mempool: Decouple mining-specific logic.

This splits the mining-specific code that deals with sorting blocks
according to the number of votes available for it in the mempool into
mining.go.

This is being done for a couple of reasons:

1) Mining-specific code does not belong directly in the mempool
2) It is required to be able to split the mempool into a separate
   package which an upcoming upstream sync does

In order to accomplish this, a new function named VotesForBlocks has been
exposed on the mempool and the SortParentsByVotes code has been moved to
mining.go and modified to no longer return errors.  The reasoning for
this is the same as the recent change to VoteHashesForBlock.  That is to
say it is not the responsibility of a sorting function to dictate
caller-specific block eligibility logic.
This commit is contained in:
Dave Collins 2016-11-21 11:16:08 -06:00
parent bfb9ca95b7
commit 3ff0cf8afe
No known key found for this signature in database
GPG Key ID: B8904D9D9C93D1F2
3 changed files with 137 additions and 120 deletions

View File

@ -11,7 +11,6 @@ import (
"fmt"
"math"
"math/big"
"sort"
"sync"
"sync/atomic"
"time"
@ -243,100 +242,26 @@ func (mp *txMemPool) VoteHashesForBlock(blockHash chainhash.Hash) []chainhash.Ha
return hashes
}
// TODO Pruning of the votes map DECRED
// blockWithLenVotes is a block with the number of votes currently present
// for that block. Just used for sorting.
type blockWithLenVotes struct {
Block chainhash.Hash
Votes uint16
}
// ByNumberOfVotes defines the methods needed to satisify sort.Interface to
// sort a slice of Blocks by their number of votes.
type ByNumberOfVotes []*blockWithLenVotes
func (b ByNumberOfVotes) Len() int { return len(b) }
func (b ByNumberOfVotes) Less(i, j int) bool { return b[i].Votes < b[j].Votes }
func (b ByNumberOfVotes) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// sortParentsByVotes takes a list of block header hashes and sorts them
// by the number of votes currently available for them in the votes map of
// mempool. It then returns all blocks that are eligible to be used (have
// at least a majority number of votes) sorted by number of votes, descending.
func (mp *txMemPool) sortParentsByVotes(currentTopBlock chainhash.Hash,
blocks []chainhash.Hash) ([]chainhash.Hash, error) {
lenBlocks := len(blocks)
if lenBlocks == 0 {
return nil, fmt.Errorf("no blocks to sort")
}
bwlvs := make([]*blockWithLenVotes, 0, lenBlocks)
for _, blockHash := range blocks {
bwlvs = append(bwlvs, &blockWithLenVotes{
blockHash,
uint16(len(mp.votes[blockHash])),
})
}
// Blocks with the most votes appear at the top of the list.
sort.Sort(sort.Reverse(ByNumberOfVotes(bwlvs)))
var sortedUsefulBlocks []chainhash.Hash
minimumVotesRequired := uint16((mp.cfg.ChainParams.TicketsPerBlock / 2) + 1)
for _, bwlv := range bwlvs {
if bwlv.Votes >= minimumVotesRequired {
sortedUsefulBlocks = append(sortedUsefulBlocks, bwlv.Block)
}
}
if sortedUsefulBlocks == nil {
return nil, miningRuleError(ErrNotEnoughVoters,
"no block had enough votes to build on top of")
}
// Make sure we don't reorganize the chain needlessly if the top block has
// the same amount of votes as the current leader after the sort. After this
// point, all blocks listed in sortedUsefulBlocks definitely also have the
// minimum number of votes required.
topBlockVotes := mp.votes[currentTopBlock]
if bwlvs[0].Votes == uint16(len(topBlockVotes)) {
if !bwlvs[0].Block.IsEqual(&currentTopBlock) {
// Find our block in the list.
pos := 0
for i, bwlv := range bwlvs {
if bwlv.Block.IsEqual(&currentTopBlock) {
pos = i
break
}
}
if pos == 0 { // Should never happen...
return nil, fmt.Errorf("couldn't find top block in list")
}
// Swap the top block into the first position. We directly access
// sortedUsefulBlocks useful blocks here with the assumption that
// since the values were accumulated from blvs, they should be
// in the same positions and we shouldn't be able to access anything
// out of bounds.
sortedUsefulBlocks[0], sortedUsefulBlocks[pos] =
sortedUsefulBlocks[pos], sortedUsefulBlocks[0]
}
}
return sortedUsefulBlocks, nil
}
// SortParentsByVotes is the concurrency safe exported version of
// sortParentsByVotes.
func (mp *txMemPool) SortParentsByVotes(currentTopBlock chainhash.Hash, blocks []chainhash.Hash) ([]chainhash.Hash, error) {
// VotesForBlocks returns the vote metadata for all votes on the provided
// block hashes that are currently available in the mempool.
//
// This function is safe for concurrent access.
func (mp *txMemPool) VotesForBlocks(hashes []chainhash.Hash) [][]*VoteTx {
result := make([][]*VoteTx, 0, len(hashes))
mp.votesMtx.Lock()
defer mp.votesMtx.Unlock()
for _, hash := range hashes {
votes := mp.votes[hash]
votesCopy := make([]*VoteTx, len(votes))
copy(votesCopy, votes)
result = append(result, votesCopy)
}
mp.votesMtx.Unlock()
return mp.sortParentsByVotes(currentTopBlock, blocks)
return result
}
// TODO Pruning of the votes map DECRED
// Ensure the txMemPool type implements the mining.TxSource interface.
var _ mining.TxSource = (*txMemPool)(nil)

124
mining.go
View File

@ -11,6 +11,7 @@ import (
"encoding/binary"
"fmt"
"math"
"sort"
"time"
"github.com/decred/dcrd/blockchain"
@ -279,6 +280,109 @@ func containsTxIns(txs []*dcrutil.Tx, tx *dcrutil.Tx) bool {
return false
}
// blockWithNumVotes is a block with the number of votes currently present
// for that block. Just used for sorting.
type blockWithNumVotes struct {
Hash chainhash.Hash
NumVotes uint16
}
// byNumberOfVotes implements sort.Interface to sort a slice of blocks by their
// number of votes.
type byNumberOfVotes []*blockWithNumVotes
// Len returns the number of elements in the slice. It is part of the
// sort.Interface implementation.
func (b byNumberOfVotes) Len() int {
return len(b)
}
// Swap swaps the elements at the passed indices. It is part of the
// sort.Interface implementation.
func (b byNumberOfVotes) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
// Less returns whether the block with index i should sort before the block with
// index j. It is part of the sort.Interface implementation.
func (b byNumberOfVotes) Less(i, j int) bool {
return b[i].NumVotes < b[j].NumVotes
}
// SortParentsByVotes takes a list of block header hashes and sorts them
// by the number of votes currently available for them in the votes map of
// mempool. It then returns all blocks that are eligible to be used (have
// at least a majority number of votes) sorted by number of votes, descending.
//
// This function is safe for concurrent access.
func SortParentsByVotes(mp *txMemPool, currentTopBlock chainhash.Hash, blocks []chainhash.Hash, params *chaincfg.Params) []chainhash.Hash {
// Return now when no blocks were provided.
lenBlocks := len(blocks)
if lenBlocks == 0 {
return nil
}
// Fetch the vote metadata for the provided block hashes from the
// mempool and filter out any blocks that do not have the minimum
// required number of votes.
minVotesRequired := (params.TicketsPerBlock / 2) + 1
voteMetadata := mp.VotesForBlocks(blocks)
filtered := make([]*blockWithNumVotes, 0, lenBlocks)
for i := range blocks {
numVotes := uint16(len(voteMetadata[i]))
if numVotes >= minVotesRequired {
filtered = append(filtered, &blockWithNumVotes{
Hash: blocks[i],
NumVotes: numVotes,
})
}
}
// Return now if there are no blocks with enough votes to be eligible to
// build on top of.
if len(filtered) == 0 {
return nil
}
// Blocks with the most votes appear at the top of the list.
sort.Sort(sort.Reverse(byNumberOfVotes(filtered)))
sortedUsefulBlocks := make([]chainhash.Hash, 0, len(filtered))
for _, bwnv := range filtered {
sortedUsefulBlocks = append(sortedUsefulBlocks, bwnv.Hash)
}
// Make sure we don't reorganize the chain needlessly if the top block has
// the same amount of votes as the current leader after the sort. After this
// point, all blocks listed in sortedUsefulBlocks definitely also have the
// minimum number of votes required.
numTopBlockVotes := uint16(len(mp.votes[currentTopBlock]))
if filtered[0].NumVotes == numTopBlockVotes && filtered[0].Hash !=
currentTopBlock {
// Attempt to find the position of the current block being built
// from in the list.
pos := 0
for i, bwnv := range filtered {
if bwnv.Hash == currentTopBlock {
pos = i
break
}
}
// Swap the top block into the first position. We directly access
// sortedUsefulBlocks useful blocks here with the assumption that
// since the values were accumulated from filtered, they should be
// in the same positions and we shouldn't be able to access anything
// out of bounds.
if pos != 0 {
sortedUsefulBlocks[0], sortedUsefulBlocks[pos] =
sortedUsefulBlocks[pos], sortedUsefulBlocks[0]
}
}
return sortedUsefulBlocks
}
// BlockTemplate houses a block that has yet to be solved along with additional
// details about the fees and the number of signature operations for each
// transaction in the block.
@ -1158,17 +1262,13 @@ func NewBlockTemplate(policy *mining.Policy, server *server,
// Get the list of blocks that we can actually build on top of. If we're
// not currently on the block that has the most votes, switch to that
// block.
eligibleParents, err := mempool.SortParentsByVotes(*prevHash, children)
if err != nil {
if err.(MiningRuleError).GetCode() == ErrNotEnoughVoters {
minrLog.Debugf("Too few voters found on any HEAD block, " +
"recycling a parent block to mine on")
return handleTooFewVoters(subsidyCache, nextBlockHeight,
payToAddress, server.blockManager)
}
minrLog.Errorf("unexpected error while sorting eligible "+
"parents: %v", err.Error())
return nil, err
eligibleParents := SortParentsByVotes(mempool, *prevHash, children,
blockManager.server.chainParams)
if len(eligibleParents) == 0 {
minrLog.Debugf("Too few voters found on any HEAD block, " +
"recycling a parent block to mine on")
return handleTooFewVoters(subsidyCache, nextBlockHeight,
payToAddress, server.blockManager)
}
minrLog.Debugf("Found eligible parent %v with enough votes to build "+
@ -1177,7 +1277,7 @@ func NewBlockTemplate(policy *mining.Policy, server *server,
// Force a reorganization to the parent with the most votes if we need
// to.
if !eligibleParents[0].IsEqual(prevHash) {
if eligibleParents[0] != *prevHash {
for _, newHead := range eligibleParents {
err := blockManager.ForceReorganization(*prevHash, newHead)
if err != nil {

View File

@ -502,25 +502,17 @@ func (sp *serverPeer) OnGetMiningState(p *peer.Peer, msg *wire.MsgGetMiningState
return
}
// Get the list of blocks that we can actually build on top of.
blockHashes, err := mp.SortParentsByVotes(*newest, children)
if err != nil {
// We couldn't find enough voters for any block, so just return now.
if err.(MiningRuleError).GetCode() == ErrNotEnoughVoters {
return
}
peerLog.Warnf("unexpected mempool error while sorting eligible "+
"parents for mining state request: %v", err.Error())
// Get the list of blocks of blocks that are eligible to built on and
// limit the list to the maximum number of allowed eligible block hashes
// per mining state message. There is nothing to send when there are no
// eligible blocks.
blockHashes := SortParentsByVotes(mp, *newest, children,
bm.server.chainParams)
numBlocks := len(blockHashes)
if numBlocks == 0 {
return
}
// Nothing to send, abort.
if len(blockHashes) == 0 {
return
}
// Construct the set of block hashes to send.
if len(blockHashes) > wire.MaxMSBlocksAtHeadPerMsg {
if numBlocks > wire.MaxMSBlocksAtHeadPerMsg {
blockHashes = blockHashes[:wire.MaxMSBlocksAtHeadPerMsg]
}