Replace the ticket database with an efficient, atomic implementation

The legacy ticket database, which was GOB serialized and stored on
shut down, has been removed.  Ticket state information is now held in
a stake node, which acts as a modularized "black box" to contain all
information about the state of the stake system.  Stake nodes are now
a component of the blockchain blockNode struct, and are updated with
them.

Stake nodes, like their internal treap primitives, are immutable
objects that are created with their connect and disconnect node
functions.  The blockchain database now stores all information about
the stake state of the best node in the block database.  The blockchain
makes the assumption that the stake state of the best node is known at
any given time.  If the states of former blocks or sidechains must be
evaluated, this can be achieved by iterating backwards along the
blockchain from the best node, and then connecting stake nodes
iteratively if necessary.

Performance improvements with this new module are dramatic.  The long
delays on start up and shut down are removed.  Blockchain
synchronization time is improved approximately 5-10x on the mainnet
chain.  The state of the database is atomic, so unexpected shut downs
should no longer have the ability to disrupt the chain state.

An upgrade path has been added for version 1 blockchain databases.
Users with this blockchain database will automatically update when
they start their clients.
This commit is contained in:
C Jepson 2015-08-26 04:54:55 -05:00 committed by cjepson
parent af40801d40
commit d98fc8319f
51 changed files with 5383 additions and 4840 deletions

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrutil"
)
@ -172,6 +173,32 @@ func (b *BlockChain) checkBlockContext(block *dcrutil.Block, prevNode *blockNode
return nil
}
// ticketsSpentInBlock fetches a list of tickets that were spent in the
// block.
func ticketsSpentInBlock(bl *dcrutil.Block) []chainhash.Hash {
var tickets []chainhash.Hash
for _, stx := range bl.STransactions() {
if stake.DetermineTxType(stx) == stake.TxTypeSSGen {
tickets = append(tickets, stx.MsgTx().TxIn[1].PreviousOutPoint.Hash)
}
}
return tickets
}
// ticketsRevokedInBlock fetches a list of tickets that were revoked in the
// block.
func ticketsRevokedInBlock(bl *dcrutil.Block) []chainhash.Hash {
var tickets []chainhash.Hash
for _, stx := range bl.STransactions() {
if stake.DetermineTxType(stx) == stake.TxTypeSSRtx {
tickets = append(tickets, stx.MsgTx().TxIn[0].PreviousOutPoint.Hash)
}
}
return tickets
}
// maybeAcceptBlock potentially accepts a block into the memory block chain.
// It performs several validation checks which depend on its position within
// the block chain before adding it. The block is expected to have already gone
@ -209,9 +236,14 @@ func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block,
return false, err
}
// Prune block nodes which are no longer needed before creating
// a new node.
// Prune stake nodes and block nodes which are no longer needed before
// creating a new node.
if !dryRun {
err = b.pruneStakeNodes()
if err != nil {
return false, err
}
err = b.pruneBlockNodes()
if err != nil {
return false, err
@ -221,21 +253,23 @@ func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block,
// Create a new block node for the block and add it to the in-memory
// block chain (could be either a side chain or the main chain).
blockHeader := &block.MsgBlock().Header
var voteBitsStake []uint16
for _, stx := range block.STransactions() {
if is, _ := stake.IsSSGen(stx); is {
vb := stake.SSGenVoteBits(stx)
voteBitsStake = append(voteBitsStake, vb)
}
}
newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, voteBitsStake)
newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, ticketsSpentInBlock(block), ticketsRevokedInBlock(block))
if prevNode != nil {
newNode.parent = prevNode
newNode.height = blockHeight
newNode.workSum.Add(prevNode.workSum, newNode.workSum)
}
// Fetching a stake node could enable a new DoS vector, so restrict
// this only to blocks that are recent in history.
if newNode.height < b.bestNode.height-minMemoryNodes {
newNode.stakeNode, err = b.fetchStakeNode(newNode)
if err != nil {
return false, err
}
newNode.stakeUndoData = newNode.stakeNode.UndoData()
}
// Connect the passed block to the chain while respecting proper chain
// selection according to the chain with the most proof of work. This
// also handles validation of the transaction scripts.

View File

@ -122,7 +122,7 @@ func (b *BlockChain) blockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
iterNode = iterNode.parent
}
if iterNode != nil && iterNode.height == blockHeight {
locator = append(locator, iterNode.hash)
locator = append(locator, &iterNode.hash)
}
continue
}
@ -174,7 +174,7 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
// This function is safe for concurrent access.
func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) {
b.chainLock.RLock()
locator := b.blockLocatorFromHash(b.bestNode.hash)
locator := b.blockLocatorFromHash(&b.bestNode.hash)
b.chainLock.RUnlock()
return locator, nil
}

View File

@ -30,12 +30,25 @@ const (
// minMemoryNodes is the minimum number of consecutive nodes needed
// in memory in order to perform all necessary validation. It is used
// to determine when it's safe to prune nodes from memory without
// causing constant dynamic reloading.
minMemoryNodes = 4096
// causing constant dynamic reloading. This value should be larger than
// that for minMemoryStakeNodes.
minMemoryNodes = 2880
// searchDepth is the distance in blocks to search down the blockchain
// to find some parent.
searchDepth = 2048
// minMemoryStakeNodes is the maximum height to keep stake nodes
// in memory for in their respective nodes. Beyond this height,
// they will need to be manually recalculated. This value should
// be at least the stake retarget interval.
minMemoryStakeNodes = 288
// mainchainBlockCacheSize is the number of mainchain blocks to
// keep in memory, by height from the tip of the mainchain.
mainchainBlockCacheSize = 12
// searchDepth is the distance in block nodes to search down the
// blockchain to find some parent, loading block nodes from the
// database if necessary. Reorganizations longer than this disance
// may fail.
searchDepth = 2880
)
// blockNode represents a block within the block chain and is primarily used to
@ -51,7 +64,7 @@ type blockNode struct {
children []*blockNode
// hash is the double sha 256 of the block.
hash *chainhash.Hash
hash chainhash.Hash
// height is the position in the block chain.
height int64
@ -65,34 +78,37 @@ type blockNode struct {
// ancestor when switching chains.
inMainChain bool
// outputAmtsTotal is amount of fees in the tx tree regular of the parent plus
// the value of the coinbase, which may or may not be given to the child node
// depending on the voters. Doesn't get set until you actually attempt to
// connect the block and calculate the fees/reward for it.
// DECRED TODO: Is this actually used anywhere? If not prune it.
outputAmtsTotal int64
// Decred: Keep the full block header.
// header is the full block header.
header wire.BlockHeader
// VoteBits for the stake voters.
voteBits []uint16
// stakeNode contains all the consensus information required for the
// staking system. The node also caches information required to add or
// remove stake nodes, so that the stake node itself may be pruneable
// to save memory while maintaining high throughput efficiency for the
// evaluation of sidechains.
stakeDataLock sync.Mutex
stakeNode *stake.Node
newTickets []chainhash.Hash
stakeUndoData stake.UndoTicketDataSlice
ticketsSpent []chainhash.Hash
ticketsRevoked []chainhash.Hash
}
// newBlockNode returns a new block node for the given block header. It is
// completely disconnected from the chain and the workSum value is just the work
// for the passed block. The work sum is updated accordingly when the node is
// inserted into a chain.
func newBlockNode(blockHeader *wire.BlockHeader, blockSha *chainhash.Hash,
height int64, voteBits []uint16) *blockNode {
func newBlockNode(blockHeader *wire.BlockHeader, blockSha *chainhash.Hash, height int64, ticketsSpent []chainhash.Hash, ticketsRevoked []chainhash.Hash) *blockNode {
// Make a copy of the hash so the node doesn't keep a reference to part
// of the full block/block header preventing it from being garbage
// collected.
node := blockNode{
hash: blockSha,
workSum: CalcWork(blockHeader.Bits),
height: height,
header: *blockHeader,
hash: *blockSha,
workSum: CalcWork(blockHeader.Bits),
height: height,
header: *blockHeader,
ticketsSpent: ticketsSpent,
ticketsRevoked: ticketsRevoked,
}
return &node
}
@ -120,7 +136,7 @@ func removeChildNode(children []*blockNode, node *blockNode) []*blockNode {
// does not reevaluate the slice on each iteration nor does it adjust
// the index for the modified slice.
for i := 0; i < len(children); i++ {
if children[i].hash.IsEqual(node.hash) {
if children[i].hash == node.hash {
copy(children[i:], children[i+1:])
children[len(children)-1] = nil
return children[:len(children)-1]
@ -151,7 +167,7 @@ type BestState struct {
// newBestState returns a new best stats instance for the given parameters.
func newBestState(node *blockNode, blockSize, numTxns, totalTxns uint64, totalSubsidy int64) *BestState {
return &BestState{
Hash: node.hash,
Hash: &node.hash,
Height: node.height,
Bits: node.header.Bits,
BlockSize: blockSize,
@ -172,7 +188,6 @@ type BlockChain struct {
checkpointsByHeight map[int64]*chaincfg.Checkpoint
db database.DB
dbInfo *databaseInfo
tmdb *stake.TicketDB
chainParams *chaincfg.Params
notifications NotificationCallback
sigCache *txscript.SigCache
@ -249,9 +264,9 @@ func (b *BlockChain) DisableVerify(disable bool) {
//
// This function is safe for concurrent access.
func (b *BlockChain) TotalSubsidy() int64 {
b.chainLock.Lock()
b.chainLock.RLock()
ts := b.BestSnapshot().TotalSubsidy
b.chainLock.Unlock()
b.chainLock.RUnlock()
return ts
}
@ -263,122 +278,6 @@ func (b *BlockChain) FetchSubsidyCache() *SubsidyCache {
return b.subsidyCache
}
// LiveTickets returns all currently live tickets from the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) LiveTickets() ([]*chainhash.Hash, error) {
live, err := b.tmdb.DumpAllLiveTicketHashes()
if err != nil {
return nil, err
}
return live, nil
}
// MissedTickets returns all currently missed tickets from the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) MissedTickets() (stake.SStxMemMap, error) {
missed, err := b.tmdb.DumpMissedTickets()
if err != nil {
return nil, err
}
return missed, nil
}
// TicketsWithAddress returns a slice of ticket hashes that are currently live
// corresponding to the given address.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) TicketsWithAddress(address dcrutil.Address) ([]chainhash.Hash,
error) {
tickets, err := b.tmdb.DumpAllLiveTicketHashes()
if err != nil {
return nil, err
}
var ticketsWithAddr []chainhash.Hash
err = b.db.View(func(dbTx database.Tx) error {
var err error
for _, hash := range tickets {
utxo, err := dbFetchUtxoEntry(dbTx, hash)
if err != nil {
return err
}
_, addrs, _, err :=
txscript.ExtractPkScriptAddrs(txscript.DefaultScriptVersion,
utxo.PkScriptByIndex(0), b.chainParams)
if addrs[0].EncodeAddress() == address.EncodeAddress() {
ticketsWithAddr = append(ticketsWithAddr, *hash)
}
}
return err
})
if err != nil {
return nil, err
}
return ticketsWithAddr, nil
}
// CheckLiveTicket returns whether or not a ticket exists in the live ticket
// map of the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) CheckLiveTicket(hash *chainhash.Hash) (bool, error) {
return b.tmdb.CheckLiveTicket(*hash)
}
// CheckLiveTickets returns whether or not a slice of tickets exist in the live
// ticket map of the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) CheckLiveTickets(hashes []*chainhash.Hash) ([]bool, error) {
var err error
existsSlice := make([]bool, len(hashes))
for i, hash := range hashes {
existsSlice[i], err = b.tmdb.CheckLiveTicket(*hash)
if err != nil {
return nil, err
}
}
return existsSlice, nil
}
// TicketPoolValue returns the current value of all the locked funds in the
// ticket pool.
//
// This function is safe for concurrent access. All live tickets are at least
// 256 blocks deep on mainnet, so the UTXO set should generally always have
// the asked for transactions.
func (b *BlockChain) TicketPoolValue() (dcrutil.Amount, error) {
tickets, err := b.tmdb.DumpAllLiveTicketHashes()
if err != nil {
return 0, err
}
var amt int64
err = b.db.View(func(dbTx database.Tx) error {
var err error
for _, hash := range tickets {
utxo, err := dbFetchUtxoEntry(dbTx, hash)
if err != nil {
return err
}
amt += utxo.sparseOutputs[0].amount
}
return err
})
if err != nil {
return 0, err
}
return dcrutil.Amount(amt), nil
}
// HaveBlock returns whether or not the chain instance has the block represented
// by the passed hash. This includes checking the various places a block can
// be like part of the main chain, on a side chain, or in the orphan pool.
@ -532,7 +431,7 @@ func (b *BlockChain) addOrphanBlock(block *dcrutil.Block) {
// getGeneration gets a generation of blocks who all have the same parent by
// taking a hash as input, locating its parent node, and then returning all
// children for that parent node including the hash passed. This can then be
// children for that parent node including the hash passed. This can then be
// used by the mempool downstream to locate all potential block template
// parents.
func (b *BlockChain) getGeneration(h chainhash.Hash) ([]chainhash.Hash, error) {
@ -540,7 +439,7 @@ func (b *BlockChain) getGeneration(h chainhash.Hash) ([]chainhash.Hash, error) {
// This typically happens because the main chain has recently
// reorganized and the block the miner is looking at is on
// a fork. Usually it corrects itself after failure.
// a fork. Usually it corrects itself after failure.
if err != nil {
return nil, fmt.Errorf("couldn't find block node in best chain: %v",
err.Error())
@ -559,7 +458,7 @@ func (b *BlockChain) getGeneration(h chainhash.Hash) ([]chainhash.Hash, error) {
lenChildren := len(p.children)
allChildren := make([]chainhash.Hash, lenChildren, lenChildren)
for i := 0; i < lenChildren; i++ {
allChildren[i] = *p.children[i].hash
allChildren[i] = p.children[i].hash
}
return allChildren, nil
@ -579,14 +478,14 @@ func (b *BlockChain) GetGeneration(hash chainhash.Hash) ([]chainhash.Hash, error
// The database transaction may be read-only.
func (b *BlockChain) loadBlockNode(dbTx database.Tx,
hash *chainhash.Hash) (*blockNode, error) {
blockHeader, err := dbFetchHeaderByHash(dbTx, hash)
block, err := dbFetchBlockByHash(dbTx, hash)
if err != nil {
return nil, err
}
var voteBitsStake []uint16
node := newBlockNode(blockHeader, hash,
int64(blockHeader.Height), voteBitsStake)
blockHeader := block.MsgBlock().Header
node := newBlockNode(&blockHeader, hash, int64(blockHeader.Height),
ticketsSpentInBlock(block), ticketsRevokedInBlock(block))
node.inMainChain = true
prevHash := &blockHeader.PrevBlock
@ -616,11 +515,22 @@ func (b *BlockChain) loadBlockNode(dbTx database.Tx,
node.children = append(node.children, childNode)
}
} else {
// Case 3 -- The node doesn't have a parent and is not the
// parent of another node. This means an arbitrary orphan block
// is trying to be loaded which is not allowed.
str := "loadBlockNode: attempt to insert orphan block %v"
return nil, AssertError(fmt.Sprintf(str, hash))
// Case 3 -- The node doesn't have a parent in the node cache
// and is not the parent of another node. This means an arbitrary
// orphan block is trying to be loaded which is not allowed.
// Before we return, check and make sure there isn't a parent
// further down the line in the blockchain to which the block
// could be attached, for example if the node had been pruned from
// the index.
foundParent, err := b.findNode(&node.header.PrevBlock)
if err == nil {
node.workSum = node.workSum.Add(foundParent.workSum, node.workSum)
foundParent.children = append(foundParent.children, node)
node.parent = foundParent
} else {
str := "loadBlockNode: attempt to insert orphan block %v"
return nil, AssertError(fmt.Sprintf(str, hash))
}
}
// Add the new node to the indices for faster lookups.
@ -632,6 +542,7 @@ func (b *BlockChain) loadBlockNode(dbTx database.Tx,
// findNode finds the node scaling backwards from best chain or return an
// error.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) findNode(nodeHash *chainhash.Hash) (*blockNode, error) {
var node *blockNode
@ -639,7 +550,7 @@ func (b *BlockChain) findNode(nodeHash *chainhash.Hash) (*blockNode, error) {
// Most common case; we're checking a block that wants to be connected
// on top of the current main chain.
distance := 0
if nodeHash.IsEqual(b.bestNode.hash) {
if *nodeHash == b.bestNode.hash {
node = b.bestNode
} else {
// Look backwards in our blockchain and try to find it in the
@ -656,10 +567,10 @@ func (b *BlockChain) findNode(nodeHash *chainhash.Hash) (*blockNode, error) {
break
}
last := foundPrev.header.PrevBlock
foundPrev = foundPrev.parent
if foundPrev == nil {
parent, err := b.loadBlockNode(dbTx,
&foundPrev.header.PrevBlock)
parent, err := b.loadBlockNode(dbTx, &last)
if err != nil {
return err
}
@ -746,35 +657,11 @@ func (b *BlockChain) getPrevNodeFromNode(node *blockNode) (*blockNode, error) {
return prevBlockNode, err
}
// GetNodeAtHeightFromTopNode goes backwards through a node until it a reaches
// the node with a desired block height; it returns this block. The benefit is
// this works for both the main chain and the side chain.
func (b *BlockChain) getNodeAtHeightFromTopNode(node *blockNode,
toTraverse int64) (*blockNode, error) {
oldNode := node
var err error
for i := 0; i < int(toTraverse); i++ {
// Get the previous block node.
oldNode, err = b.getPrevNodeFromNode(oldNode)
if err != nil {
return nil, err
}
if oldNode == nil {
return nil, fmt.Errorf("unable to obtain previous node; " +
"ancestor is genesis block")
}
}
return oldNode, nil
}
// getBlockFromHash searches the internal chain block stores and the database in
// an attempt to find the block. If it finds the block, it returns it.
// fetchBlockFromHash searches the internal chain block stores and the database in
// an attempt to find the block. If it finds the block, it returns it.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) getBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
func (b *BlockChain) fetchBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
error) {
// Check side chain block cache
b.blockCacheLock.RLock()
@ -795,11 +682,10 @@ func (b *BlockChain) getBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
// Check main chain
b.mainchainBlockCacheLock.RLock()
block, ok := b.mainchainBlockCache[*hash]
b.mainchainBlockCacheLock.RUnlock()
if ok {
b.mainchainBlockCacheLock.RUnlock()
return block, nil
}
b.mainchainBlockCacheLock.RUnlock()
var blockMainchain *dcrutil.Block
errFetchMainchain := b.db.View(func(dbTx database.Tx) error {
@ -816,19 +702,20 @@ func (b *BlockChain) getBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
"side chain cache, orphan cache, and main chain db", hash)
}
// GetBlockFromHash is the generalized and exported version of getBlockFromHash.
func (b *BlockChain) GetBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
// FetchBlockFromHash is the generalized and exported version of
// fetchBlockFromHash. It is safe for concurrent access.
func (b *BlockChain) FetchBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block,
error) {
b.chainLock.RLock()
defer b.chainLock.RUnlock()
return b.getBlockFromHash(hash)
return b.fetchBlockFromHash(hash)
}
// GetTopBlock returns the current block at HEAD on the blockchain. Needed
// GetTopBlock returns the current block at HEAD on the blockchain. Needed
// for mining in the daemon.
func (b *BlockChain) GetTopBlock() (dcrutil.Block, error) {
block, err := b.getBlockFromHash(b.bestNode.hash)
return *block, err
func (b *BlockChain) GetTopBlock() (*dcrutil.Block, error) {
block, err := b.fetchBlockFromHash(&b.bestNode.hash)
return block, err
}
// removeBlockNode removes the passed block node from the memory chain by
@ -844,7 +731,7 @@ func (b *BlockChain) removeBlockNode(node *blockNode) error {
}
// Remove the node from the node index.
delete(b.index, *node.hash)
delete(b.index, node.hash)
// Unlink all of the node's children.
for _, child := range node.children {
@ -906,9 +793,61 @@ func (b *BlockChain) pruneBlockNodes() error {
// the dependency index, and remove it from the node index.
for e := deleteNodes.Front(); e != nil; e = e.Next() {
node := e.Value.(*blockNode)
err := b.removeBlockNode(node)
if err != nil {
return err
// Do not attempt to prune if the node should already have been pruned,
// for example if you're adding an old side chain block.
if node.height > b.bestNode.height-minMemoryNodes {
err := b.removeBlockNode(node)
if err != nil {
return err
}
}
}
return nil
}
// pruneStakeNodes removes references to old stake nodes which should no
// longer be held in memory so as to keep the maximum memory usage down.
// It proceeds from the bestNode back to the determined minimum height node,
// finds all the relevant children, and then drops the the stake nodes from
// them by assigning nil and allowing the memory to be recovered by GC.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) pruneStakeNodes() error {
// Find the height to prune to.
pruneToNode := b.bestNode
for i := int64(0); i < minMemoryStakeNodes-1 && pruneToNode != nil; i++ {
pruneToNode = pruneToNode.parent
}
// Nothing to do if there are not enough nodes.
if pruneToNode == nil || pruneToNode.parent == nil {
return nil
}
// Push the nodes to delete on a list in reverse order since it's easier
// to prune them going forwards than it is backwards. This will
// typically end up being a single node since pruning is currently done
// just before each new node is created. However, that might be tuned
// later to only prune at intervals, so the code needs to account for
// the possibility of multiple nodes.
deleteNodes := list.New()
for node := pruneToNode.parent; node != nil; node = node.parent {
deleteNodes.PushFront(node)
}
// Loop through each node to prune, unlink its children, remove it from
// the dependency index, and remove it from the node index.
for e := deleteNodes.Front(); e != nil; e = e.Next() {
node := e.Value.(*blockNode)
// Do not attempt to prune if the node should already have been pruned,
// for example if you're adding an old side chain block.
if node.height > b.bestNode.height-minMemoryNodes {
node.stakeNode = nil
node.stakeUndoData = nil
node.newTickets = nil
node.ticketsSpent = nil
node.ticketsRevoked = nil
}
}
@ -1027,12 +966,12 @@ func (b *BlockChain) CalcPastMedianTime() (time.Time, error) {
// passed node is not on a side chain.
//
// This function MUST be called with the chain state lock held (for reads).
func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) {
func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List, error) {
// Nothing to detach or attach if there is no node.
attachNodes := list.New()
detachNodes := list.New()
if node == nil {
return detachNodes, attachNodes
return detachNodes, attachNodes, nil
}
// Find the fork point (if any) adding each block to the list of nodes
@ -1056,14 +995,23 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List
// Start from the end of the main chain and work backwards until the
// common ancestor adding each block to the list of nodes to detach from
// the main chain.
for n := b.bestNode; n != nil && n.parent != nil; n = n.parent {
if n.hash.IsEqual(ancestor.hash) {
for n := b.bestNode; n != nil; n = n.parent {
if n.parent == nil {
var err error
n.parent, err = b.findNode(&n.header.PrevBlock)
if err != nil {
return nil, nil, err
}
}
if n.hash == ancestor.hash {
break
}
detachNodes.PushBack(n)
}
return detachNodes, attachNodes
return detachNodes, attachNodes, nil
}
// pushMainChainBlockCache pushes a block onto the main chain block cache,
@ -1109,14 +1057,14 @@ func dbMaybeStoreBlock(dbTx database.Tx, block *dcrutil.Block) error {
func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
view *UtxoViewpoint, stxos []spentTxOut) error {
// Make sure it's extending the end of the best chain.
prevHash := &block.MsgBlock().Header.PrevBlock
if !prevHash.IsEqual(b.bestNode.hash) {
prevHash := block.MsgBlock().Header.PrevBlock
if prevHash != b.bestNode.hash {
return AssertError("connectBlock must be called with a block " +
"that extends the main chain")
}
// Sanity check the correct number of stxos are provided.
parent, err := b.getBlockFromHash(node.parent.hash)
parent, err := b.fetchBlockFromHash(&node.parent.hash)
if err != nil {
return err
}
@ -1143,34 +1091,13 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
state := newBestState(node, blockSize, numTxns, curTotalTxns+numTxns,
curTotalSubsidy+subsidy)
// Insert block into ticket database if we're the point where tickets begin to
// mature. Note that if the block is inserted into tmdb and then insertion
// into DB fails, the two database will be on different HEADs. This needs
// to be handled correctly in the near future.
if node.height >= b.chainParams.StakeEnabledHeight {
spentAndMissedTickets, newTickets, _, err :=
b.tmdb.InsertBlock(block, parent)
if err != nil {
return err
}
nextStakeDiff, err := b.calcNextRequiredStakeDifficulty(node)
if err != nil {
return err
}
// Notify of spent and missed tickets
b.sendNotification(NTSpentAndMissedTickets,
&TicketNotificationsData{*node.hash,
node.height,
nextStakeDiff,
spentAndMissedTickets})
// Notify of new tickets
b.sendNotification(NTNewTickets,
&TicketNotificationsData{*node.hash,
node.height,
nextStakeDiff,
newTickets})
// Get the stake node for this node, filling in any data that
// may have yet to have been filled in. In all cases this
// should simply give a pointer to data already prepared, but
// run this anyway to be safe.
stakeNode, err := b.fetchStakeNode(node)
if err != nil {
return err
}
// Atomically insert info into the database.
@ -1209,6 +1136,12 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
return err
}
// Insert the block into the stake database.
err = stake.WriteConnectedBestNode(dbTx, stakeNode, node.hash)
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
@ -1222,16 +1155,6 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
return nil
})
if err != nil {
log.Errorf("Failed to insert block %v: %s", node.hash, err.Error())
// Attempt to restore TicketDb if this fails.
if node.height >= b.chainParams.StakeEnabledHeight {
_, _, _, errRemove := b.tmdb.RemoveBlockToHeight(node.height - 1)
if errRemove != nil {
return errRemove
}
}
return err
}
@ -1242,8 +1165,8 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
// Add the new node to the memory main chain indices for faster
// lookups.
node.inMainChain = true
b.index[*node.hash] = node
b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node)
b.index[node.hash] = node
b.depNodes[prevHash] = append(b.depNodes[prevHash], node)
// This node is now the end of the best chain.
b.bestNode = node
@ -1257,6 +1180,35 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
b.stateSnapshot = state
b.stateLock.Unlock()
// Send stake notifications about the new block.
if node.height >= b.chainParams.StakeEnabledHeight {
nextStakeDiff, err := b.calcNextRequiredStakeDifficulty(node)
if err != nil {
return err
}
// Notify of spent and missed tickets
b.sendNotification(NTSpentAndMissedTickets,
&TicketNotificationsData{
Hash: node.hash,
Height: node.height,
StakeDifficulty: nextStakeDiff,
TicketsSpent: node.stakeNode.SpentByBlock(),
TicketsMissed: node.stakeNode.MissedByBlock(),
TicketsNew: []chainhash.Hash{},
})
// Notify of new tickets
b.sendNotification(NTNewTickets,
&TicketNotificationsData{
Hash: node.hash,
Height: node.height,
StakeDifficulty: nextStakeDiff,
TicketsSpent: []chainhash.Hash{},
TicketsMissed: []chainhash.Hash{},
TicketsNew: node.stakeNode.NewTickets(),
})
}
// Assemble the current block and the parent into a slice.
blockAndParent := []*dcrutil.Block{block, parent}
@ -1267,6 +1219,16 @@ func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block,
b.sendNotification(NTBlockConnected, blockAndParent)
b.chainLock.Lock()
// Optimization: Before checkpoints, immediately dump the parent's stake
// node because we no longer need it.
if node.height < b.chainParams.LatestCheckpointHeight() {
b.bestNode.parent.stakeNode = nil
b.bestNode.parent.stakeUndoData = nil
b.bestNode.parent.newTickets = nil
b.bestNode.parent.ticketsSpent = nil
b.bestNode.parent.ticketsRevoked = nil
}
b.pushMainChainBlockCache(block)
return nil
@ -1287,7 +1249,7 @@ func (b *BlockChain) dropMainChainBlockCache(block *dcrutil.Block) {
func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block,
view *UtxoViewpoint) error {
// Make sure the node being disconnected is the end of the best chain.
if !node.hash.IsEqual(b.bestNode.hash) {
if node.hash != b.bestNode.hash {
return AssertError("disconnectBlock must be called with the " +
"block at the end of the main chain")
}
@ -1296,25 +1258,13 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block,
// accessing node.parent directly as it will dynamically create previous
// block nodes as needed. This helps allow only the pieces of the chain
// that are needed to remain in memory.
// Remove from ticket database.
maturityHeight := int64(b.chainParams.TicketMaturity) +
int64(b.chainParams.CoinbaseMaturity)
if node.height-1 >= maturityHeight {
_, _, _, err := b.tmdb.RemoveBlockToHeight(node.height - 1)
if err != nil {
return err
}
}
// if we're above the point in which the stake db is enabled.
prevNode, err := b.getPrevNodeFromNode(node)
if err != nil {
return err
}
// Load the previous block since some details for it are needed below.
parent, err := b.getBlockFromHash(prevNode.hash)
parent, err := b.fetchBlockFromHash(&prevNode.hash)
if err != nil {
return err
}
@ -1339,6 +1289,17 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block,
state := newBestState(prevNode, parentBlockSize, numTxns, newTotalTxns,
newTotalSubsidy)
// Prepare the information required to update the stake database
// contents.
childStakeNode, err := b.fetchStakeNode(node)
if err != nil {
return err
}
parentStakeNode, err := b.fetchStakeNode(node.parent)
if err != nil {
return err
}
err = b.db.Update(func(dbTx database.Tx) error {
// Update best block state.
err := dbPutBestState(dbTx, state, node.workSum)
@ -1368,6 +1329,12 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block,
return err
}
err = stake.WriteDisconnectedBestNode(dbTx, parentStakeNode,
node.parent.hash, childStakeNode.UndoData())
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being disconnected so they
// can update themselves accordingly.
@ -1391,7 +1358,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block,
// Put block in the side chain cache.
node.inMainChain = false
b.blockCacheLock.Lock()
b.blockCache[*node.hash] = block
b.blockCache[node.hash] = block
b.blockCacheLock.Unlock()
// This node's parent is now the end of the best chain.
@ -1483,7 +1450,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
for e := attachNodes.Front(); e != nil; e = e.Next() {
b.blockCacheLock.RLock()
n := e.Value.(*blockNode)
if _, exists := b.blockCache[*n.hash]; !exists {
if _, exists := b.blockCache[n.hash]; !exists {
return AssertError(fmt.Sprintf("block %v is missing "+
"from the side chain block cache", n.hash))
}
@ -1503,7 +1470,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
// database and using that information to unspend all of the spent txos
// and remove the utxos created by the blocks.
view := NewUtxoViewpoint()
view.SetBestHash(formerBestHash)
view.SetBestHash(&formerBestHash)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
i := 0
for e := detachNodes.Front(); e != nil; e = e.Next() {
@ -1511,11 +1478,11 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
var block *dcrutil.Block
var parent *dcrutil.Block
var err error
block, err = b.getBlockFromHash(n.hash)
block, err = b.fetchBlockFromHash(&n.hash)
if err != nil {
return err
}
parent, err = b.getBlockFromHash(&n.header.PrevBlock)
parent, err = b.fetchBlockFromHash(&n.header.PrevBlock)
if err != nil {
return err
}
@ -1567,7 +1534,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
b.blockCacheLock.RLock()
block := b.blockCache[*n.hash]
block := b.blockCache[n.hash]
b.blockCacheLock.RUnlock()
// Notice the spent txout details are not requested here and
@ -1593,9 +1560,9 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
// Send a notification that a blockchain reorganization is in progress.
reorgData := &ReorganizationNtfnsData{
*formerBestHash,
formerBestHash,
formerBestHeight,
*newHash,
newHash,
newHeight,
}
b.chainLock.Unlock()
@ -1608,14 +1575,14 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
// view to be valid from the viewpoint of each block being connected or
// disconnected.
view = NewUtxoViewpoint()
view.SetBestHash(formerBestHash)
view.SetBestHash(&formerBestHash)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
// Disconnect blocks from the main chain.
for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() {
n := e.Value.(*blockNode)
block := detachBlocks[i]
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&n.header.PrevBlock)
if err != nil {
return err
}
@ -1646,10 +1613,10 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
b.blockCacheLock.RLock()
block := b.blockCache[*n.hash]
block := b.blockCache[n.hash]
b.blockCacheLock.RUnlock()
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&n.header.PrevBlock)
if err != nil {
return err
}
@ -1669,7 +1636,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List,
if err != nil {
return err
}
delete(b.blockCache, *n.hash)
delete(b.blockCache, n.hash)
}
// Log the point where the chain forked.
@ -1723,7 +1690,7 @@ func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash,
"common parent for forced reorg")
}
newBestBlock, err := b.getBlockFromHash(&newBest)
newBestBlock, err := b.fetchBlockFromHash(&newBest)
if err != nil {
return err
}
@ -1733,11 +1700,11 @@ func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash,
view.SetBestHash(&b.bestNode.header.PrevBlock)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
formerBestBlock, err := b.getBlockFromHash(&formerBest)
formerBestBlock, err := b.fetchBlockFromHash(&formerBest)
if err != nil {
return err
}
commonParentBlock, err := b.getBlockFromHash(formerBestNode.parent.hash)
commonParentBlock, err := b.fetchBlockFromHash(&formerBestNode.parent.hash)
if err != nil {
return err
}
@ -1776,7 +1743,7 @@ func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash,
return err
}
attach, detach := b.getReorganizeNodes(newBestNode)
attach, detach, err := b.getReorganizeNodes(newBestNode)
if err != nil {
return err
}
@ -1816,11 +1783,11 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block,
// We are extending the main (best) chain with a new block. This is the
// most common case.
if node.header.PrevBlock.IsEqual(b.bestNode.hash) {
if node.header.PrevBlock == b.bestNode.hash {
// Fetch the best block, now the parent, to be able to
// connect the txTreeRegular if needed.
// TODO optimize by not fetching if not needed?
parent, err := b.getBlockFromHash(&node.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&node.header.PrevBlock)
if err != nil {
return false, err
}
@ -1897,9 +1864,9 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block,
log.Debugf("Adding block %v to side chain cache", node.hash)
}
b.blockCacheLock.Lock()
b.blockCache[*node.hash] = block
b.blockCache[node.hash] = block
b.blockCacheLock.Unlock()
b.index[*node.hash] = node
b.index[node.hash] = node
// Connect the parent node to this node.
node.inMainChain = false
@ -1913,9 +1880,9 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block,
children = removeChildNode(children, node)
node.parent.children = children
delete(b.index, *node.hash)
delete(b.index, node.hash)
b.blockCacheLock.Lock()
delete(b.blockCache, *node.hash)
delete(b.blockCache, node.hash)
b.blockCacheLock.Unlock()
}()
}
@ -1942,7 +1909,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block,
}
// Log information about how the block is forking the chain.
if fork.hash.IsEqual(node.parent.hash) {
if fork.hash == node.parent.hash {
log.Infof("FORK: Block %v (height %v) forks the chain at height "+
"%d/block %v, but does not cause a reorganize",
node.hash,
@ -1969,14 +1936,17 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block,
// blocks that form the (now) old fork from the main chain, and attach
// the blocks that form the new chain to the main chain starting at the
// common ancenstor (the point where the chain forked).
detachNodes, attachNodes := b.getReorganizeNodes(node)
detachNodes, attachNodes, err := b.getReorganizeNodes(node)
if err != nil {
return false, err
}
// Reorganize the chain.
if !dryRun {
log.Infof("REORGANIZE: Block %v is causing a reorganize.",
node.hash)
}
err := b.reorganizeChain(detachNodes, attachNodes, flags)
err = b.reorganizeChain(detachNodes, attachNodes, flags)
if err != nil {
return false, err
}
@ -2051,9 +2021,6 @@ type Config struct {
// This field is required.
DB database.DB
// tmdb
TMDB *stake.TicketDB
// ChainParams identifies which chain parameters the chain is associated
// with.
//
@ -2110,7 +2077,6 @@ func New(config *Config) (*BlockChain, error) {
b := BlockChain{
checkpointsByHeight: checkpointsByHeight,
db: config.DB,
tmdb: config.TMDB,
chainParams: params,
notifications: config.Notifications,
sigCache: config.SigCache,
@ -2122,7 +2088,7 @@ func New(config *Config) (*BlockChain, error) {
prevOrphans: make(map[chainhash.Hash][]*orphanBlock),
blockCache: make(map[chainhash.Hash]*dcrutil.Block),
mainchainBlockCache: make(map[chainhash.Hash]*dcrutil.Block),
mainchainBlockCacheSize: int(params.CoinbaseMaturity) + 1,
mainchainBlockCacheSize: mainchainBlockCacheSize,
}
// Initialize the chain state from the passed database. When the db
@ -2140,12 +2106,17 @@ func New(config *Config) (*BlockChain, error) {
}
}
// Apply any upgrades as needed.
if err := b.upgrade(); err != nil {
return nil, err
}
b.subsidyCache = NewSubsidyCache(b.bestNode.height, b.chainParams)
log.Infof("Blockchain database version %v loaded successfully",
log.Infof("Blockchain database version %v loaded",
b.dbInfo.version)
log.Infof("Chain state (height %d, hash %v, total transactions %d, work %v)",
log.Infof("Chain state: height %d, hash %v, total transactions %d, work %v",
b.bestNode.height, b.bestNode.hash, b.stateSnapshot.TotalTxns,
b.bestNode.workSum)

View File

@ -11,7 +11,7 @@ import (
"sort"
"time"
"github.com/decred/dcrd/blockchain/dbnamespace"
"github.com/decred/dcrd/blockchain/internal/dbnamespace"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
@ -27,7 +27,7 @@ const (
// currentDatabaseVersion indicates what the current database
// version is.
currentDatabaseVersion = 1
currentDatabaseVersion = 2
)
// errNotInMainChain signifies that a block hash or height that is not in the
@ -1094,11 +1094,11 @@ func serializeDatabaseInfo(dbi *databaseInfo) []byte {
// information.
func dbPutDatabaseInfo(dbTx database.Tx, dbi *databaseInfo) error {
meta := dbTx.Metadata()
subsidyBucket := meta.Bucket(dbnamespace.BlockChainDbInfoBucketName)
bucket := meta.Bucket(dbnamespace.BlockChainDbInfoBucketName)
val := serializeDatabaseInfo(dbi)
// Store the current best chain state into the database.
return subsidyBucket.Put(dbnamespace.BlockChainDbInfoBucketName, val)
return bucket.Put(dbnamespace.BlockChainDbInfoBucketName, val)
}
// deserializeDatabaseInfo deserializes a database information struct.
@ -1160,7 +1160,6 @@ func dbFetchDatabaseInfo(dbTx database.Tx) (*databaseInfo, error) {
// block height uint32 4 bytes
// total txns uint64 8 bytes
// total subsidy int64 8 bytes
// ticket pool value int64 8 bytes
// work sum length uint32 4 bytes
// work sum big.Int work sum length
// -----------------------------------------------------------------------------
@ -1271,12 +1270,13 @@ func (b *BlockChain) createChainState() error {
// Create a new node from the genesis block and set it as the best node.
genesisBlock := dcrutil.NewBlock(b.chainParams.GenesisBlock)
header := &genesisBlock.MsgBlock().Header
node := newBlockNode(header, genesisBlock.Sha(), 0, []uint16{})
node := newBlockNode(header, genesisBlock.Sha(), 0, []chainhash.Hash{},
[]chainhash.Hash{})
node.inMainChain = true
b.bestNode = node
// Add the new node to the index which is used for faster lookups.
b.index[*node.hash] = node
b.index[node.hash] = node
// Initialize the state related to the best block.
numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
@ -1336,7 +1336,7 @@ func (b *BlockChain) createChainState() error {
// Add the genesis block hash to height and height to hash
// mappings to the index.
err = dbPutBlockIndex(dbTx, b.bestNode.hash, b.bestNode.height)
err = dbPutBlockIndex(dbTx, &b.bestNode.hash, b.bestNode.height)
if err != nil {
return err
}
@ -1347,6 +1347,13 @@ func (b *BlockChain) createChainState() error {
return err
}
// Initialize the stake buckets in the database, along with
// the best state for the stake database.
b.bestNode.stakeNode, err = stake.InitDatabaseState(dbTx, b.chainParams)
if err != nil {
return err
}
// Store the genesis block into the database.
return dbTx.StoreBlock(genesisBlock)
})
@ -1382,14 +1389,14 @@ func (b *BlockChain) initChainState() error {
// of the database. In the future we can add upgrade path before this
// to ensure that the database is upgraded to the current version
// before hitting this.
if dbInfo.version != currentDatabaseVersion {
if dbInfo.version > currentDatabaseVersion {
return fmt.Errorf("The blockchain database's version is %v "+
"but the current version of the software is %v",
dbInfo.version, currentDatabaseVersion)
}
// Die here if we're not on the current compression version, too.
if dbInfo.compVer != currentCompressionVersion {
if dbInfo.compVer > currentCompressionVersion {
return fmt.Errorf("The blockchain database's compression "+
"version is %v but the current version of the software is %v",
dbInfo.version, currentDatabaseVersion)
@ -1424,18 +1431,32 @@ func (b *BlockChain) initChainState() error {
// Create a new node and set it as the best node. The preceding
// nodes will be loaded on demand as needed.
// TODO CJ Get vote bits from db
header := &block.Header
node := newBlockNode(header, &state.hash, int64(state.height),
[]uint16{})
ticketsSpentInBlock(dcrutil.NewBlock(&block)),
ticketsRevokedInBlock(dcrutil.NewBlock(&block)))
node.inMainChain = true
node.workSum = state.workSum
// Exception for version 1 blockchains: skip loading the stake
// node, as the upgrade path handles ensuring this is correctly
// set.
if dbInfo.version >= 2 {
node.stakeNode, err = stake.LoadBestNode(dbTx, uint32(node.height),
node.hash, node.header, b.chainParams)
if err != nil {
return err
}
node.stakeUndoData = node.stakeNode.UndoData()
node.newTickets = node.stakeNode.NewTickets()
}
b.bestNode = node
// Add the new node to the indices for faster lookups.
prevHash := &node.header.PrevBlock
b.index[*node.hash] = node
b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node)
prevHash := node.header.PrevBlock
b.index[node.hash] = node
b.depNodes[prevHash] = append(b.depNodes[prevHash], node)
// Initialize the state related to the best block.
blockSize := uint64(len(blockBytes))
@ -1642,7 +1663,10 @@ func (b *BlockChain) BlockByHeight(blockHeight int64) (*dcrutil.Block, error) {
//
// This function is safe for concurrent access.
func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*dcrutil.Block, error) {
return b.getBlockFromHash(hash)
b.chainLock.RLock()
defer b.chainLock.RUnlock()
return b.fetchBlockFromHash(hash)
}
// HeightRange returns a range of block hashes for the given start and end
@ -1704,3 +1728,40 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int64) ([]chainhash.Hash
})
return hashList, err
}
// DumpBlockChain dumps the blockchain to a map of height --> serialized bytes.
// Mainly used for generating tests.
func DumpBlockChain(db database.DB, height int64) (map[int64][]byte, error) {
blockchain := make(map[int64][]byte)
var hash chainhash.Hash
err := db.View(func(dbTx database.Tx) error {
for i := int64(0); i <= height; i++ {
// Fetch blocks and put them in the map
var serializedHeight [4]byte
dbnamespace.ByteOrder.PutUint32(serializedHeight[:], uint32(height))
meta := dbTx.Metadata()
heightIndex := meta.Bucket(dbnamespace.HeightIndexBucketName)
hashBytes := heightIndex.Get(serializedHeight[:])
if hashBytes == nil {
return fmt.Errorf("no block at height %d exists", height)
}
copy(hash[:], hashBytes)
blockBLocal, err := dbTx.FetchBlock(&hash)
if err != nil {
return err
}
blockB := make([]byte, len(blockBLocal))
copy(blockB, blockBLocal)
blockchain[i] = blockB
}
return nil
})
if err != nil {
return nil, err
}
return blockchain, err
}

View File

@ -1030,22 +1030,22 @@ func TestDatabaseInfoSerialization(t *testing.T) {
{
name: "not upgrade",
info: databaseInfo{
version: currentDatabaseVersion,
compVer: currentCompressionVersion,
version: 2,
compVer: 1,
date: time.Unix(int64(0x57acca95), 0),
upgradeStarted: false,
},
serialized: hexToBytes("010000000100000095caac57"),
serialized: hexToBytes("020000000100000095caac57"),
},
{
name: "upgrade",
info: databaseInfo{
version: currentDatabaseVersion,
compVer: currentCompressionVersion,
version: 2,
compVer: 1,
date: time.Unix(int64(0x57acca95), 0),
upgradeStarted: true,
},
serialized: hexToBytes("010000800100000095caac57"),
serialized: hexToBytes("020000800100000095caac57"),
},
}

View File

@ -2,7 +2,6 @@ package blockchain
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
@ -37,7 +36,8 @@ func (b *BlockChain) DoStxoTest() error {
}
if int(ntx) != len(stxos) {
fmt.Printf("bad number of stxos calculated at height %v, got %v expected %v\n",
return fmt.Errorf("bad number of stxos calculated at "+
"height %v, got %v expected %v",
i, len(stxos), int(ntx))
}
}
@ -318,173 +318,6 @@ func DebugMsgTxString(msgTx *wire.MsgTx) string {
return buffer.String()
}
// DebugTicketDataString writes the contents of a ticket data struct
// as a string.
func DebugTicketDataString(td *stake.TicketData) string {
var buffer bytes.Buffer
str := fmt.Sprintf("SStxHash: %v\n", td.SStxHash)
buffer.WriteString(str)
str = fmt.Sprintf("SpendHash: %v\n", td.SpendHash)
buffer.WriteString(str)
str = fmt.Sprintf("BlockHeight: %v\n", td.BlockHeight)
buffer.WriteString(str)
str = fmt.Sprintf("Prefix: %v\n", td.Prefix)
buffer.WriteString(str)
str = fmt.Sprintf("Missed: %v\n", td.Missed)
buffer.WriteString(str)
str = fmt.Sprintf("Expired: %v\n", td.Expired)
buffer.WriteString(str)
return buffer.String()
}
// DebugTicketDBLiveString prints out the number of tickets in each
// bucket of the ticket database as a string.
func DebugTicketDBLiveString(tmdb *stake.TicketDB,
chainParams *chaincfg.Params) (string, error) {
var buffer bytes.Buffer
buffer.WriteString("\n")
for i := 0; i < stake.BucketsSize; i++ {
bucketTickets, err := tmdb.DumpLiveTickets(uint8(i))
if err != nil {
return "", err
}
str := fmt.Sprintf("%v: %v\t", i, len(bucketTickets))
buffer.WriteString(str)
// Add newlines.
if (i+1)%4 == 0 {
buffer.WriteString("\n")
}
}
return buffer.String(), nil
}
// DebugTicketDBLiveBucketString returns a string containing the ticket hashes
// found in a specific bucket of the live ticket database. If the verbose flag
// is called, it dumps the contents of the ticket data as well.
func DebugTicketDBLiveBucketString(tmdb *stake.TicketDB, bucket uint8,
verbose bool) (string, error) {
var buffer bytes.Buffer
str := fmt.Sprintf("Contents of live ticket bucket %v:\n", bucket)
buffer.WriteString(str)
bucketTickets, err := tmdb.DumpLiveTickets(bucket)
if err != nil {
return "", err
}
for hash, td := range bucketTickets {
str = fmt.Sprintf("%v\n", hash)
buffer.WriteString(str)
if verbose {
str = fmt.Sprintf("%v\n", DebugTicketDataString(td))
buffer.WriteString(str)
}
}
return buffer.String(), nil
}
// DebugTicketDBSpentBucketString prints the contents of the spent tickets
// database bucket indicated to a string that is returned. If the verbose
// flag is indicated, the contents of each ticket are printed as well.
func DebugTicketDBSpentBucketString(tmdb *stake.TicketDB, height int64,
verbose bool) (string, error) {
var buffer bytes.Buffer
str := fmt.Sprintf("Contents of spent ticket bucket height %v:\n", height)
buffer.WriteString(str)
bucketTickets, err := tmdb.DumpSpentTickets(height)
if err != nil {
return "", err
}
for hash, td := range bucketTickets {
missedStr := ""
if td.Missed {
missedStr = "Missed"
} else {
missedStr = "Spent"
}
str = fmt.Sprintf("%v (%v)\n", hash, missedStr)
buffer.WriteString(str)
if verbose {
str = fmt.Sprintf("%v\n", DebugTicketDataString(td))
buffer.WriteString(str)
}
}
return buffer.String(), nil
}
// DebugTicketDBMissedString prints out the contents of the missed ticket
// database to a string. If verbose is indicated, the ticket data itself
// is printed along with the ticket hashes.
func DebugTicketDBMissedString(tmdb *stake.TicketDB, verbose bool) (string,
error) {
var buffer bytes.Buffer
str := fmt.Sprintf("Contents of missed ticket database:\n")
buffer.WriteString(str)
bucketTickets, err := tmdb.DumpMissedTickets()
if err != nil {
return "", err
}
for hash, td := range bucketTickets {
str = fmt.Sprintf("%v\n", hash)
buffer.WriteString(str)
if verbose {
str = fmt.Sprintf("%v\n", DebugTicketDataString(td))
buffer.WriteString(str)
}
}
return buffer.String(), nil
}
// writeTicketDataToBuf writes some ticket data into a buffer as serialized
// data.
func writeTicketDataToBuf(buf *bytes.Buffer, td *stake.TicketData) {
buf.Write(td.SStxHash[:])
buf.Write(td.SpendHash[:])
// OK for our purposes.
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(td.BlockHeight))
buf.Write(b)
buf.Write([]byte{byte(td.Prefix)})
if td.Missed {
buf.Write([]byte{0x01})
} else {
buf.Write([]byte{0x00})
}
if td.Expired {
buf.Write([]byte{0x01})
} else {
buf.Write([]byte{0x00})
}
}
// DebugUtxoEntryData returns a string containing information about the data
// stored in the given UtxoEntry.
func DebugUtxoEntryData(hash chainhash.Hash, utx *UtxoEntry) string {
@ -637,99 +470,3 @@ func DebugStxosData(stxs []spentTxOut) string {
return buffer.String()
}
// TicketDbThumbprint takes all the tickets in the respective ticket db,
// sorts them, hashes their contents into a list, and then hashes that list.
// The resultant hash is the thumbprint of the ticket database, and should
// be the same across all clients that are synced to the same block. Returns
// an array of hashes len 3, containing (1) live tickets (2) spent tickets
// and (3) missed tickets.
// Do NOT use on mainnet or in production. For debug use only! Make sure
// the blockchain is frozen when you call this function.
func TicketDbThumbprint(tmdb *stake.TicketDB,
chainParams *chaincfg.Params) ([]*chainhash.Hash, error) {
// Container for the three master hashes to go into.
dbThumbprints := make([]*chainhash.Hash, 3, 3)
// (1) Live tickets.
allLiveTickets := stake.NewTicketDataSliceEmpty()
for i := 0; i < stake.BucketsSize; i++ {
bucketTickets, err := tmdb.DumpLiveTickets(uint8(i))
if err != nil {
return nil, err
}
for _, td := range bucketTickets {
allLiveTickets = append(allLiveTickets, td)
}
}
// Sort by the number data hash, since we already have this implemented
// and it's also unique.
sort.Sort(allLiveTickets)
// Create a buffer, dump all the data into it, and hash.
var buf bytes.Buffer
for _, td := range allLiveTickets {
writeTicketDataToBuf(&buf, td)
}
liveHash := chainhash.HashFunc(buf.Bytes())
liveThumbprint, err := chainhash.NewHash(liveHash[:])
if err != nil {
return nil, err
}
dbThumbprints[0] = liveThumbprint
// (2) Spent tickets.
height := tmdb.GetTopBlock()
allSpentTickets := stake.NewTicketDataSliceEmpty()
for i := int64(chainParams.StakeEnabledHeight); i <= height; i++ {
bucketTickets, err := tmdb.DumpSpentTickets(i)
if err != nil {
return nil, err
}
for _, td := range bucketTickets {
allSpentTickets = append(allSpentTickets, td)
}
}
sort.Sort(allSpentTickets)
buf.Reset() // Flush buffer
for _, td := range allSpentTickets {
writeTicketDataToBuf(&buf, td)
}
spentHash := chainhash.HashFunc(buf.Bytes())
spentThumbprint, err := chainhash.NewHash(spentHash[:])
if err != nil {
return nil, err
}
dbThumbprints[1] = spentThumbprint
// (3) Missed tickets.
allMissedTickets := stake.NewTicketDataSliceEmpty()
missedTickets, err := tmdb.DumpMissedTickets()
if err != nil {
return nil, err
}
for _, td := range missedTickets {
allMissedTickets = append(allMissedTickets, td)
}
sort.Sort(allMissedTickets)
buf.Reset() // Flush buffer
missedHash := chainhash.HashFunc(buf.Bytes())
missedThumbprint, err := chainhash.NewHash(missedHash[:])
if err != nil {
return nil, err
}
dbThumbprints[2] = missedThumbprint
return dbThumbprints, nil
}

View File

@ -15,7 +15,6 @@ import (
"strings"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
@ -68,8 +67,6 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain,
// Handle memory database specially since it doesn't need the disk
// specific handling.
var db database.DB
tmdb := new(stake.TicketDB)
var teardown func()
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
@ -81,7 +78,6 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain,
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
tmdb.Close()
db.Close()
}
} else {
@ -106,7 +102,6 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain,
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
tmdb.Close()
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
@ -116,7 +111,6 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain,
// Create the main chain instance.
chain, err := blockchain.New(&blockchain.Config{
DB: db,
TMDB: tmdb,
ChainParams: params,
})
@ -125,12 +119,6 @@ func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain,
err := fmt.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
// Start the ticket database.
tmdb.Initialize(params, db)
err = tmdb.RescanTicketDB()
if err != nil {
return nil, nil, err
}
return chain, teardown, nil
}

View File

@ -824,7 +824,7 @@ func (b *BlockChain) estimateNextStakeDifficulty(curNode *blockNode,
emptyHeader.FreshStake = freshStake
// Connect the header.
emptyHeader.PrevBlock = *topNode.hash
emptyHeader.PrevBlock = topNode.hash
// Make up a node hash.
hB, err := emptyHeader.Bytes()
@ -835,7 +835,7 @@ func (b *BlockChain) estimateNextStakeDifficulty(curNode *blockNode,
thisNode := new(blockNode)
thisNode.header = *emptyHeader
thisNode.hash = &emptyHeaderHash
thisNode.hash = emptyHeaderHash
thisNode.height = i
thisNode.parent = topNode
topNode = thisNode

View File

@ -12,7 +12,6 @@ import (
"path/filepath"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
@ -40,15 +39,12 @@ func ExampleBlockChain_ProcessBlock() {
defer os.RemoveAll(dbPath)
defer db.Close()
var tmdb *stake.TicketDB
// Create a new BlockChain instance using the underlying database for
// the main bitcoin network. This example does not demonstrate some
// of the other available configuration options such as specifying a
// notification callback and signature cache.
chain, err := blockchain.New(&blockchain.Config{
DB: db,
TMDB: tmdb,
ChainParams: &chaincfg.MainNetParams,
})
if err != nil {

View File

@ -847,7 +847,6 @@ func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Bloc
// Add all of the index entries for each address.
stakeIdxsStart := len(parentTxLocs)
allTxLocs := append(parentTxLocs, blockStxLocs...)
//offsetStakeIdxStartBlock := len(block.Transactions())
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {

View File

@ -10,6 +10,7 @@ import (
"fmt"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/internal/progresslog"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
@ -392,7 +393,7 @@ func (m *Manager) Init(chain *blockchain.BlockChain) error {
}
// Create a progress logger for the indexing process below.
progressLogger := newBlockProgressLogger("Indexed", log)
progressLogger := progresslog.NewBlockProgressLogger("Indexed", log)
// At this point, one or more indexes are behind the current best chain
// tip and need to be caught up, so log the details and loop through

View File

@ -3,7 +3,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
package progresslog
import (
"sync"
@ -14,10 +14,10 @@ import (
"github.com/decred/dcrutil"
)
// blockProgressLogger provides periodic logging for other services in order
// BlockProgressLogger provides periodic logging for other services in order
// to show users progress of certain "actions" involving some or all current
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
type blockProgressLogger struct {
type BlockProgressLogger struct {
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime time.Time
@ -27,12 +27,12 @@ type blockProgressLogger struct {
sync.Mutex
}
// newBlockProgressLogger returns a new block progress logger.
// NewBlockProgressLogger returns a new block progress logger.
// The progress message is templated as follows:
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
return &blockProgressLogger{
func NewBlockProgressLogger(progressMessage string, logger btclog.Logger) *BlockProgressLogger {
return &BlockProgressLogger{
lastBlockLogTime: time.Now(),
progressAction: progressMessage,
subsystemLogger: logger,
@ -42,7 +42,7 @@ func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *block
// LogBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockProgressLogger) LogBlockHeight(block, parent *dcrutil.Block) {
func (b *BlockProgressLogger) LogBlockHeight(block, parent *dcrutil.Block) {
b.Lock()
defer b.Unlock()
b.receivedLogBlocks++
@ -73,8 +73,9 @@ func (b *blockProgressLogger) LogBlockHeight(block, parent *dcrutil.Block) {
txStr = "transaction"
}
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
b.progressAction, b.receivedLogBlocks, blockStr, tDuration,
b.receivedLogTx, txStr, block.Height(),
block.MsgBlock().Header.Timestamp)
b.receivedLogBlocks = 0
b.receivedLogTx = 0

View File

@ -8,7 +8,6 @@ package blockchain
import (
"fmt"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrutil"
)
@ -89,7 +88,9 @@ type TicketNotificationsData struct {
Hash chainhash.Hash
Height int64
StakeDifficulty int64
TicketMap stake.SStxMemMap
TicketsSpent []chainhash.Hash
TicketsMissed []chainhash.Hash
TicketsNew []chainhash.Hash
}
// Notification defines notification that is sent to the caller via the callback

View File

@ -186,6 +186,7 @@ func (b *BlockChain) ProcessBlock(block *dcrutil.Block,
blockHeader.Timestamp, checkpointTime)
return false, false, ruleError(ErrCheckpointTimeTooOld, str)
}
if !fastAdd {
// Even though the checks prior to now have already ensured the
// proof of work exceeds the claimed amount, the claimed amount

View File

@ -371,9 +371,6 @@ func reorgTestForced(t *testing.T) {
// reorganization to test the block chain handling code.
func TestReorganization(t *testing.T) {
reorgTestLong(t)
// This can take a while, do not enable it by default.
// reorgTestShort(t)
reorgTestShort(t)
reorgTestForced(t)
}

View File

@ -1,129 +0,0 @@
// Copyright (c) 2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package stake_test
import (
"fmt"
"os"
"path/filepath"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
"github.com/decred/dcrd/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.MainNet
)
// filesExists returns whether or not the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// chainSetup is used to create a new db and chain instance with the genesis
// block already inserted. In addition to the new chain instnce, it returns
// a teardown function the caller should invoke when done testing to clean up.
func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
// specific handling.
var db database.DB
tmdb := new(stake.TicketDB)
var teardown func()
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
tmdb.Close()
db.Close()
}
} else {
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := fmt.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
}
// Create a new database to store the accepted blocks into.
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
tmdb.Close()
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
}
// Create the main chain instance.
chain, err := blockchain.New(&blockchain.Config{
DB: db,
TMDB: tmdb,
ChainParams: params,
})
if err != nil {
teardown()
err := fmt.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
// Start the ticket database.
tmdb.Initialize(params, db)
err = tmdb.RescanTicketDB()
if err != nil {
return nil, nil, err
}
return chain, teardown, nil
}

View File

@ -147,6 +147,35 @@ const (
// ErrVerifyOutPkhs indicates that the recipient of the P2PKH or P2SH
// script was different from that indicated in the SStx input.
ErrVerifyOutPkhs
// ErrDatabaseCorrupt indicates a database inconsistency.
ErrDatabaseCorrupt
// ErrMissingDatabaseTx indicates that a node disconnection failed to
// pass a database transaction when attempted to remove a very old
// node.
ErrMissingDatabaseTx
// ErrMemoryCorruption indicates that memory has somehow become corrupt,
// for example invalid block header serialization from an in memory
// struct.
ErrMemoryCorruption
// ErrFindTicketIdxs indicates a failure to find the selected ticket
// indexes from the block header.
ErrFindTicketIdxs
// ErrMissingTicket indicates that a ticket was missing in one of the
// ticket treaps when it was attempted to be fetched.
ErrMissingTicket
// ErrDuplicateTicket indicates that a duplicate ticket was attempted
// to be inserted into a ticket treap or the database.
ErrDuplicateTicket
// ErrUnknownTicketSpent indicates that an unknown ticket was spent by
// the block.
ErrUnknownTicketSpent
)
// Map of ErrorCode values back to their constant names for pretty printing.
@ -183,6 +212,13 @@ var errorCodeStrings = map[ErrorCode]string{
ErrVerifySpendTooMuch: "ErrVerifySpendTooMuch",
ErrVerifyOutputAmt: "ErrVerifyOutputAmt",
ErrVerifyOutPkhs: "ErrVerifyOutPkhs",
ErrDatabaseCorrupt: "ErrDatabaseCorrupt",
ErrMissingDatabaseTx: "ErrMissingDatabaseTx",
ErrMemoryCorruption: "ErrMemoryCorruption",
ErrFindTicketIdxs: "ErrFindTicketIdxs",
ErrMissingTicket: "ErrMissingTicket",
ErrDuplicateTicket: "ErrDuplicateTicket",
ErrUnknownTicketSpent: "ErrUnknownTicketSpent",
}
// String returns the ErrorCode as a human-readable name.

View File

@ -8,52 +8,54 @@ package stake_test
import (
"testing"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
)
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
func TestErrorCodeStringer(t *testing.T) {
tests := []struct {
in blockchain.ErrorCode
in stake.ErrorCode
want string
}{
{blockchain.ErrDuplicateBlock, "ErrDuplicateBlock"},
{blockchain.ErrBlockTooBig, "ErrBlockTooBig"},
{blockchain.ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
{blockchain.ErrInvalidTime, "ErrInvalidTime"},
{blockchain.ErrTimeTooOld, "ErrTimeTooOld"},
{blockchain.ErrTimeTooNew, "ErrTimeTooNew"},
{blockchain.ErrDifficultyTooLow, "ErrDifficultyTooLow"},
{blockchain.ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"},
{blockchain.ErrHighHash, "ErrHighHash"},
{blockchain.ErrBadMerkleRoot, "ErrBadMerkleRoot"},
{blockchain.ErrBadCheckpoint, "ErrBadCheckpoint"},
{blockchain.ErrForkTooOld, "ErrForkTooOld"},
{blockchain.ErrCheckpointTimeTooOld, "ErrCheckpointTimeTooOld"},
{blockchain.ErrNoTransactions, "ErrNoTransactions"},
{blockchain.ErrTooManyTransactions, "ErrTooManyTransactions"},
{blockchain.ErrNoTxInputs, "ErrNoTxInputs"},
{blockchain.ErrNoTxOutputs, "ErrNoTxOutputs"},
{blockchain.ErrTxTooBig, "ErrTxTooBig"},
{blockchain.ErrBadTxOutValue, "ErrBadTxOutValue"},
{blockchain.ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
{blockchain.ErrBadTxInput, "ErrBadTxInput"},
{blockchain.ErrBadCheckpoint, "ErrBadCheckpoint"},
{blockchain.ErrMissingTx, "ErrMissingTx"},
{blockchain.ErrUnfinalizedTx, "ErrUnfinalizedTx"},
{blockchain.ErrDuplicateTx, "ErrDuplicateTx"},
{blockchain.ErrOverwriteTx, "ErrOverwriteTx"},
{blockchain.ErrImmatureSpend, "ErrImmatureSpend"},
{blockchain.ErrDoubleSpend, "ErrDoubleSpend"},
{blockchain.ErrSpendTooHigh, "ErrSpendTooHigh"},
{blockchain.ErrBadFees, "ErrBadFees"},
{blockchain.ErrTooManySigOps, "ErrTooManySigOps"},
{blockchain.ErrFirstTxNotCoinbase, "ErrFirstTxNotCoinbase"},
{blockchain.ErrMultipleCoinbases, "ErrMultipleCoinbases"},
{blockchain.ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"},
{blockchain.ErrBadCoinbaseValue, "ErrBadCoinbaseValue"},
{blockchain.ErrScriptMalformed, "ErrScriptMalformed"},
{blockchain.ErrScriptValidation, "ErrScriptValidation"},
{stake.ErrSStxTooManyInputs, "ErrSStxTooManyInputs"},
{stake.ErrSStxTooManyOutputs, "ErrSStxTooManyOutputs"},
{stake.ErrSStxNoOutputs, "ErrSStxNoOutputs"},
{stake.ErrSStxInvalidInputs, "ErrSStxInvalidInputs"},
{stake.ErrSStxInvalidOutputs, "ErrSStxInvalidOutputs"},
{stake.ErrSStxInOutProportions, "ErrSStxInOutProportions"},
{stake.ErrSStxBadCommitAmount, "ErrSStxBadCommitAmount"},
{stake.ErrSStxBadChangeAmts, "ErrSStxBadChangeAmts"},
{stake.ErrSStxVerifyCalcAmts, "ErrSStxVerifyCalcAmts"},
{stake.ErrSSGenWrongNumInputs, "ErrSSGenWrongNumInputs"},
{stake.ErrSSGenTooManyOutputs, "ErrSSGenTooManyOutputs"},
{stake.ErrSSGenNoOutputs, "ErrSSGenNoOutputs"},
{stake.ErrSSGenWrongIndex, "ErrSSGenWrongIndex"},
{stake.ErrSSGenWrongTxTree, "ErrSSGenWrongTxTree"},
{stake.ErrSSGenNoStakebase, "ErrSSGenNoStakebase"},
{stake.ErrSSGenNoReference, "ErrSSGenNoReference"},
{stake.ErrSSGenBadReference, "ErrSSGenBadReference"},
{stake.ErrSSGenNoVotePush, "ErrSSGenNoVotePush"},
{stake.ErrSSGenBadVotePush, "ErrSSGenBadVotePush"},
{stake.ErrSSGenBadGenOuts, "ErrSSGenBadGenOuts"},
{stake.ErrSSRtxWrongNumInputs, "ErrSSRtxWrongNumInputs"},
{stake.ErrSSRtxTooManyOutputs, "ErrSSRtxTooManyOutputs"},
{stake.ErrSSRtxNoOutputs, "ErrSSRtxNoOutputs"},
{stake.ErrSSRtxWrongTxTree, "ErrSSRtxWrongTxTree"},
{stake.ErrSSRtxBadOuts, "ErrSSRtxBadOuts"},
{stake.ErrVerSStxAmts, "ErrVerSStxAmts"},
{stake.ErrVerifyInput, "ErrVerifyInput"},
{stake.ErrVerifyOutType, "ErrVerifyOutType"},
{stake.ErrVerifyTooMuchFees, "ErrVerifyTooMuchFees"},
{stake.ErrVerifySpendTooMuch, "ErrVerifySpendTooMuch"},
{stake.ErrVerifyOutputAmt, "ErrVerifyOutputAmt"},
{stake.ErrVerifyOutPkhs, "ErrVerifyOutPkhs"},
{stake.ErrDatabaseCorrupt, "ErrDatabaseCorrupt"},
{stake.ErrMissingDatabaseTx, "ErrMissingDatabaseTx"},
{stake.ErrMemoryCorruption, "ErrMemoryCorruption"},
{stake.ErrFindTicketIdxs, "ErrFindTicketIdxs"},
{stake.ErrMissingTicket, "ErrMissingTicket"},
{stake.ErrDuplicateTicket, "ErrDuplicateTicket"},
{stake.ErrUnknownTicketSpent, "ErrUnknownTicketSpent"},
{0xffff, "Unknown ErrorCode (65535)"},
}
@ -71,15 +73,13 @@ func TestErrorCodeStringer(t *testing.T) {
// TestRuleError tests the error output for the RuleError type.
func TestRuleError(t *testing.T) {
tests := []struct {
in blockchain.RuleError
in stake.RuleError
want string
}{
{
blockchain.RuleError{Description: "duplicate block"},
{stake.RuleError{Description: "duplicate block"},
"duplicate block",
},
{
blockchain.RuleError{Description: "human-readable error"},
{stake.RuleError{Description: "human-readable error"},
"human-readable error",
},
}

View File

@ -0,0 +1,46 @@
// Package dbnamespace contains constants that define the database namespaces
// for the purpose of the blockchain, so that external callers may easily access
// this data.
package dbnamespace
import (
"encoding/binary"
)
var (
// ByteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
ByteOrder = binary.LittleEndian
// StakeDbInfoBucketName is the name of the database bucket used to
// house a single k->v that stores global versioning and date information for
// the stake database.
StakeDbInfoBucketName = []byte("stakedbinfo")
// StakeChainStateKeyName is the name of the db key used to store the best
// chain state from the perspective of the stake database.
StakeChainStateKeyName = []byte("stakechainstate")
// LiveTicketsBucketName is the name of the db bucket used to house the
// list of live tickets keyed to their entry height.
LiveTicketsBucketName = []byte("livetickets")
// MissedTicketsBucketName is the name of the db bucket used to house the
// list of missed tickets keyed to their entry height.
MissedTicketsBucketName = []byte("missedtickets")
// RevokedTicketsBucketName is the name of the db bucket used to house the
// list of revoked tickets keyed to their entry height.
RevokedTicketsBucketName = []byte("revokedtickets")
// StakeBlockUndoDataBucketName is the name of the db bucket used to house the
// information used to roll back the three main databases when regressing
// backwards through the blockchain and restoring the stake information
// to that of an earlier height. It is keyed to a mainchain height.
StakeBlockUndoDataBucketName = []byte("stakeblockundo")
// TicketsInBlockBucketName is the name of the db bucket used to house the
// list of tickets in a block added to the mainchain, so that it can be
// looked up later to insert new tickets into the live ticket database.
TicketsInBlockBucketName = []byte("ticketsinblock")
)

View File

@ -0,0 +1,720 @@
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ticketdb
import (
"fmt"
"time"
"github.com/decred/dcrd/blockchain/stake/internal/dbnamespace"
"github.com/decred/dcrd/blockchain/stake/internal/tickettreap"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
)
const (
// upgradeStartedBit if the bit flag for whether or not a database
// upgrade is in progress. It is used to determine if the database
// is in an inconsistent state from the update.
upgradeStartedBit = 0x80000000
// currentDatabaseVersion indicates what the current database
// version is.
currentDatabaseVersion = 1
)
// Database structure -------------------------------------------------------------
//
// Buckets
//
// The information about the ticket database is defined by the
// StakeDbInfoBucketName bucket. By default, this bucket contains a single key
// keyed to the contents of StakeDbInfoBucketName which contains the value of
// all the database information, such as the date created and the version of
// the database.
//
// Blockchain state is stored in a root key named StakeChainStateKeyName. This
// contains the current height of the blockchain, which should be equivalent to
// the height of the best chain on start up.
//
// There are 5 buckets from the database reserved for tickets. These are:
// 1. Live
// Live ticket bucket, for tickets currently in the lottery
//
// k: ticket hash
// v: height
//
// 2. Missed
// Missed tickets bucket, for all tickets that are missed.
//
// k: ticket hash
// v: height
//
// 3. Revoked
// Revoked tickets bucket, for all tickets that are Revoked.
//
// k: ticket hash
// v: height
//
// 4. BlockUndo
// Block removal data, for reverting the the first 3 database buckets to
// a previous state.
//
// k: height
// v: serialized undo ticket data
//
// 5. TicketsToAdd
// Tickets to add bucket, which tells which tickets will be maturing and
// entering the (1) in the event that a block at that height is added.
//
// k: height
// v: serialized list of ticket hashes
//
// For pruned nodes, both 4 and 5 can be curtailed to include only the most
// recent blocks.
//
// Procedures ---------------------------------------------------------------------
//
// Adding a block
//
// The steps for the addition of a block are as follows:
// 1. Remove the n (constant, n=5 for all Decred networks) many tickets that were
// selected this block. The results of this feed into two database updates:
// ------> A database entry containing all the data for the block
// | required to undo the adding of the block (as serialized
// | SpentTicketData and MissedTicketData)
// \--> All missed tickets must be moved to the missed ticket bucket.
//
// 2. Expire any tickets from this block.
// The results of this feed into two database updates:
// ------> A database entry containing all the data for the block
// | required to undo the adding of the block (as serialized
// | MissedTicketData)
// \--> All expired tickets must be moved to the missed ticket bucket.
//
// 3. All revocations in the block are processed, and the revoked ticket moved
// from the missed ticket bucket to the revocations bucket:
// ------> A database entry containing all the data for the block
// | required to undo the adding of the block (as serialized
// | MissedTicketData, revoked flag added)
// \--> All revoked tickets must be moved to the revoked ticket bucket.
//
// 4. All newly maturing tickets must be added to the live ticket bucket. These
// are previously stored in the "tickets to add" bucket so they can more
// easily be pulled down when adding a block without having to load the
// entire block itself and suffer the deserialization overhead. The only
// things that must be written for this step are newly added tickets to the
// ticket database, along with their respective heights.
//
// Removing a block
//
// Steps 1 through 4 above are iterated through in reverse. The newly maturing
// ticket hashes are fetched from the "tickets to add" bucket for the given
// height that was used at this block height, and the tickets are dropped from
// the live ticket bucket. The UndoTicketData is then fetched for the block and
// iterated through in reverse order (it was stored in forward order) to restore
// any changes to the relevant buckets made when inserting the block. Finally,
// the data for the block removed is purged from both the BlockUndo and
// TicketsToAdd buckets.
// -----------------------------------------------------------------------------
// The database information contains information about the version and date
// of the blockchain database.
//
// Field Type Size Description
// version uint32 4 bytes The version of the database
// date uint32 4 bytes The date of the creation of the database
//
// The high bit (0x80000000) is used on version to indicate that an upgrade
// is in progress and used to confirm the database fidelity on start up.
// -----------------------------------------------------------------------------
// databaseInfoSize is the serialized size of the best chain state in bytes.
const databaseInfoSize = 8
// DatabaseInfo is the structure for a database.
type DatabaseInfo struct {
Version uint32
Date time.Time
UpgradeStarted bool
}
// serializeDatabaseInfo serializes a database information struct.
func serializeDatabaseInfo(dbi *DatabaseInfo) []byte {
version := dbi.Version
if dbi.UpgradeStarted {
version |= upgradeStartedBit
}
val := make([]byte, databaseInfoSize)
versionBytes := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(versionBytes, version)
copy(val[0:4], versionBytes)
timestampBytes := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(timestampBytes, uint32(dbi.Date.Unix()))
copy(val[4:8], timestampBytes)
return val
}
// DbPutDatabaseInfo uses an existing database transaction to store the database
// information.
func DbPutDatabaseInfo(dbTx database.Tx, dbi *DatabaseInfo) error {
meta := dbTx.Metadata()
subsidyBucket := meta.Bucket(dbnamespace.StakeDbInfoBucketName)
val := serializeDatabaseInfo(dbi)
// Store the current database info into the database.
return subsidyBucket.Put(dbnamespace.StakeDbInfoBucketName, val[:])
}
// deserializeDatabaseInfo deserializes a database information struct.
func deserializeDatabaseInfo(dbInfoBytes []byte) (*DatabaseInfo, error) {
if len(dbInfoBytes) < databaseInfoSize {
return nil, ticketDBError(ErrDatabaseInfoShortRead,
"short read when deserializing best chain state data")
}
rawVersion := dbnamespace.ByteOrder.Uint32(dbInfoBytes[0:4])
upgradeStarted := (upgradeStartedBit & rawVersion) > 0
version := rawVersion &^ upgradeStartedBit
ts := dbnamespace.ByteOrder.Uint32(dbInfoBytes[4:8])
return &DatabaseInfo{
Version: version,
Date: time.Unix(int64(ts), 0),
UpgradeStarted: upgradeStarted,
}, nil
}
// DbFetchDatabaseInfo uses an existing database transaction to
// fetch the database versioning and creation information.
func DbFetchDatabaseInfo(dbTx database.Tx) (*DatabaseInfo, error) {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.StakeDbInfoBucketName)
// Uninitialized state.
if bucket == nil {
return nil, nil
}
dbInfoBytes := bucket.Get(dbnamespace.StakeDbInfoBucketName)
if dbInfoBytes == nil {
return nil, ticketDBError(ErrMissingKey, "missing key for database info")
}
return deserializeDatabaseInfo(dbInfoBytes)
}
// -----------------------------------------------------------------------------
// The best chain state consists of the best block hash and height, the total
// number of live tickets, the total number of missed tickets, and the number of
// revoked tickets.
//
// The serialized format is:
//
// <block hash><block height><live><missed><revoked>
//
// Field Type Size
// block hash chainhash.Hash chainhash.HashSize
// block height uint32 4 bytes
// live tickets uint32 4 bytes
// missed tickets uint64 8 bytes
// revoked tickets uint64 8 bytes
// tickets per block uint16 2 bytes
// next winners []chainhash.Hash chainhash.hashSize * tickets per block
// -----------------------------------------------------------------------------
// minimumBestChainStateSize is the minimum serialized size of the best chain
// state in bytes.
var minimumBestChainStateSize = chainhash.HashSize + 4 + 4 + 8 + 8 + 2
// BestChainState represents the data to be stored the database for the current
// best chain state.
type BestChainState struct {
Hash chainhash.Hash
Height uint32
Live uint32
Missed uint64
Revoked uint64
PerBlock uint16
NextWinners []chainhash.Hash
}
// serializeBestChainState returns the serialization of the passed block best
// chain state. This is data to be stored in the chain state bucket. This
// function will panic if the number of tickets per block is less than the
// size of next winners, which should never happen unless there is memory
// corruption.
func serializeBestChainState(state BestChainState) []byte {
// Serialize the chain state.
serializedData := make([]byte, minimumBestChainStateSize)
offset := 0
copy(serializedData[offset:offset+chainhash.HashSize], state.Hash[:])
offset += chainhash.HashSize
dbnamespace.ByteOrder.PutUint32(serializedData[offset:], state.Height)
offset += 4
dbnamespace.ByteOrder.PutUint32(serializedData[offset:], state.Live)
offset += 4
dbnamespace.ByteOrder.PutUint64(serializedData[offset:], state.Missed)
offset += 8
dbnamespace.ByteOrder.PutUint64(serializedData[offset:], state.Revoked)
offset += 8
dbnamespace.ByteOrder.PutUint16(serializedData[offset:], state.PerBlock)
offset += 2
// Serialize the next winners.
ticketBuffer := make([]byte, chainhash.HashSize*int(state.PerBlock))
serializedData = append(serializedData, ticketBuffer...)
for i := range state.NextWinners {
copy(serializedData[offset:offset+chainhash.HashSize],
state.NextWinners[i][:])
offset += chainhash.HashSize
}
return serializedData[:]
}
// deserializeBestChainState deserializes the passed serialized best chain
// state. This is data stored in the chain state bucket and is updated after
// every block is connected or disconnected form the main chain.
// block.
func deserializeBestChainState(serializedData []byte) (BestChainState, error) {
// Ensure the serialized data has enough bytes to properly deserialize
// the state.
if len(serializedData) < minimumBestChainStateSize {
return BestChainState{}, ticketDBError(ErrChainStateShortRead,
"short read when deserializing best chain state data")
}
state := BestChainState{}
offset := 0
copy(state.Hash[:], serializedData[offset:offset+chainhash.HashSize])
offset += chainhash.HashSize
state.Height = dbnamespace.ByteOrder.Uint32(serializedData[offset : offset+4])
offset += 4
state.Live = dbnamespace.ByteOrder.Uint32(
serializedData[offset : offset+4])
offset += 4
state.Missed = dbnamespace.ByteOrder.Uint64(
serializedData[offset : offset+8])
offset += 8
state.Revoked = dbnamespace.ByteOrder.Uint64(
serializedData[offset : offset+8])
offset += 8
state.PerBlock = dbnamespace.ByteOrder.Uint16(
serializedData[offset : offset+2])
offset += 2
state.NextWinners = make([]chainhash.Hash, int(state.PerBlock))
for i := 0; i < int(state.PerBlock); i++ {
copy(state.NextWinners[i][:],
serializedData[offset:offset+chainhash.HashSize])
offset += chainhash.HashSize
}
return state, nil
}
// DbFetchBestState uses an existing database transaction to fetch the best chain
// state.
func DbFetchBestState(dbTx database.Tx) (BestChainState, error) {
meta := dbTx.Metadata()
v := meta.Get(dbnamespace.StakeChainStateKeyName)
if v == nil {
return BestChainState{}, ticketDBError(ErrMissingKey,
"missing key for chain state data")
}
return deserializeBestChainState(v)
}
// DbPutBestState uses an existing database transaction to update the best chain
// state with the given parameters.
func DbPutBestState(dbTx database.Tx, bcs BestChainState) error {
// Serialize the current best chain state.
serializedData := serializeBestChainState(bcs)
// Store the current best chain state into the database.
return dbTx.Metadata().Put(dbnamespace.StakeChainStateKeyName, serializedData)
}
// UndoTicketData is the data for any ticket that has been spent, missed, or
// revoked at some new height. It is used to roll back the database in the
// event of reorganizations or determining if a side chain block is valid.
// The last 3 are encoded as a single byte of flags.
// The flags describe a particular state for the ticket:
// 1. Missed is set, but revoked and spent are not (0000 0001). The ticket
// was selected in the lottery at this block height but missed, or the
// ticket became too old and was missed. The ticket is being moved to the
// missed ticket bucket from the live ticket bucket.
// 2. Missed and revoked are set (0000 0011). The ticket was missed
// previously at a block before this one and was revoked, and
// as such is being moved to the revoked ticket bucket from the
// missed ticket bucket.
// 3. Spent is set (0000 0100). The ticket has been spent and is removed
// from the live ticket bucket.
// 4. No flags are set. The ticket was newly added to the live ticket
// bucket this block as a maturing ticket.
type UndoTicketData struct {
TicketHash chainhash.Hash
TicketHeight uint32
Missed bool
Revoked bool
Spent bool
Expired bool
}
// undoTicketDataSize is the serialized size of an UndoTicketData struct in bytes.
const undoTicketDataSize = 37
// undoBitFlagsToByte converts the bools of the UndoTicketData struct into a
// series of bitflags in a single byte.
func undoBitFlagsToByte(missed, revoked, spent, expired bool) byte {
var b byte
if missed {
b |= 1 << 0
}
if revoked {
b |= 1 << 1
}
if spent {
b |= 1 << 2
}
if expired {
b |= 1 << 3
}
return b
}
// undoBitFlagsFromByte converts a byte into its relevant flags.
func undoBitFlagsFromByte(b byte) (bool, bool, bool, bool) {
missed := b&(1<<0) > 0
revoked := b&(1<<1) > 0
spent := b&(1<<2) > 0
expired := b&(1<<3) > 0
return missed, revoked, spent, expired
}
// serializeBlockUndoData serializes an entire list of relevant tickets for
// undoing tickets at any given height.
func serializeBlockUndoData(utds []UndoTicketData) []byte {
b := make([]byte, len(utds)*undoTicketDataSize)
offset := 0
for _, utd := range utds {
copy(b[offset:offset+chainhash.HashSize], utd.TicketHash[:])
offset += chainhash.HashSize
dbnamespace.ByteOrder.PutUint32(b[offset:offset+4], utd.TicketHeight)
offset += 4
b[offset] = undoBitFlagsToByte(utd.Missed, utd.Revoked, utd.Spent,
utd.Expired)
offset++
}
return b
}
// deserializeBlockUndoData deserializes a list of UndoTicketData for an entire
// block. Empty but non-nil slices are deserialized empty.
func deserializeBlockUndoData(b []byte) ([]UndoTicketData, error) {
if b != nil && len(b) == 0 {
return make([]UndoTicketData, 0), nil
}
if len(b) < undoTicketDataSize {
return nil, ticketDBError(ErrUndoDataShortRead, "short read when "+
"deserializing block undo data")
}
if len(b)%undoTicketDataSize != 0 {
return nil, ticketDBError(ErrUndoDataCorrupt, "corrupt data found "+
"when deserializing block undo data")
}
entries := len(b) / undoTicketDataSize
utds := make([]UndoTicketData, entries)
offset := 0
for i := 0; i < entries; i++ {
hash, err := chainhash.NewHash(
b[offset : offset+chainhash.HashSize])
if err != nil {
return nil, ticketDBError(ErrUndoDataCorrupt, "corrupt hash found "+
"when deserializing block undo data")
}
offset += chainhash.HashSize
height := dbnamespace.ByteOrder.Uint32(b[offset : offset+4])
offset += 4
missed, revoked, spent, expired := undoBitFlagsFromByte(b[offset])
offset++
utds[i] = UndoTicketData{
TicketHash: *hash,
TicketHeight: height,
Missed: missed,
Revoked: revoked,
Spent: spent,
Expired: expired,
}
}
return utds, nil
}
// DbFetchBlockUndoData fetches block undo data from the database.
func DbFetchBlockUndoData(dbTx database.Tx, height uint32) ([]UndoTicketData, error) {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.StakeBlockUndoDataBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
v := bucket.Get(k)
if v == nil {
return nil, ticketDBError(ErrMissingKey,
fmt.Sprintf("missing key %v for block undo data", height))
}
return deserializeBlockUndoData(v)
}
// DbPutBlockUndoData inserts block undo data into the database for a given height.
func DbPutBlockUndoData(dbTx database.Tx, height uint32, utds []UndoTicketData) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.StakeBlockUndoDataBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
v := serializeBlockUndoData(utds)
return bucket.Put(k[:], v[:])
}
// DbDropBlockUndoData drops block undo data from the database at a given height.
func DbDropBlockUndoData(dbTx database.Tx, height uint32) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.StakeBlockUndoDataBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
return bucket.Delete(k)
}
// TicketHashes is a list of ticket hashes that will mature in TicketMaturity
// many blocks from the block in which they were included.
type TicketHashes []chainhash.Hash
// serializeTicketHashes serializes a list of ticket hashes.
func serializeTicketHashes(ths TicketHashes) []byte {
b := make([]byte, len(ths)*chainhash.HashSize)
offset := 0
for _, th := range ths {
copy(b[offset:offset+chainhash.HashSize], th[:])
offset += chainhash.HashSize
}
return b
}
// deserializeTicketHashes deserializes a list of ticket hashes. Empty but
// non-nil slices are deserialized empty.
func deserializeTicketHashes(b []byte) (TicketHashes, error) {
if b != nil && len(b) == 0 {
return make(TicketHashes, 0), nil
}
if len(b) < chainhash.HashSize {
return nil, ticketDBError(ErrTicketHashesShortRead, "short read when "+
"deserializing ticket hashes")
}
if len(b)%chainhash.HashSize != 0 {
return nil, ticketDBError(ErrTicketHashesCorrupt, "corrupt data found "+
"when deserializing ticket hashes")
}
entries := len(b) / chainhash.HashSize
ths := make(TicketHashes, entries)
offset := 0
for i := 0; i < entries; i++ {
hash, err := chainhash.NewHash(
b[offset : offset+chainhash.HashSize])
if err != nil {
return nil, ticketDBError(ErrUndoDataCorrupt, "corrupt hash found "+
"when deserializing block undo data")
}
offset += chainhash.HashSize
ths[i] = *hash
}
return ths, nil
}
// DbFetchNewTickets fetches new tickets for a mainchain block from the database.
func DbFetchNewTickets(dbTx database.Tx, height uint32) (TicketHashes, error) {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.TicketsInBlockBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
v := bucket.Get(k)
if v == nil {
return nil, ticketDBError(ErrMissingKey,
fmt.Sprintf("missing key %v for new tickets", height))
}
return deserializeTicketHashes(v)
}
// DbPutNewTickets inserts new tickets for a mainchain block data into the
// database.
func DbPutNewTickets(dbTx database.Tx, height uint32, ths TicketHashes) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.TicketsInBlockBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
v := serializeTicketHashes(ths)
return bucket.Put(k[:], v[:])
}
// DbDropNewTickets drops new tickets for a mainchain block data at some height.
func DbDropNewTickets(dbTx database.Tx, height uint32) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(dbnamespace.TicketsInBlockBucketName)
k := make([]byte, 4)
dbnamespace.ByteOrder.PutUint32(k, height)
return bucket.Delete(k)
}
// DbDeleteTicket removes a ticket from one of the ticket database buckets. This
// differs from the bucket deletion method in that it will fail if the value
// itself is missing.
func DbDeleteTicket(dbTx database.Tx, ticketBucket []byte, hash *chainhash.Hash) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(ticketBucket)
// Check to see if the value exists before we delete it.
v := bucket.Get(hash[:])
if v == nil {
return ticketDBError(ErrMissingKey, fmt.Sprintf("missing key %v "+
"to delete", hash))
}
return bucket.Delete(hash[:])
}
// DbPutTicket inserts a ticket into one of the ticket database buckets.
func DbPutTicket(dbTx database.Tx, ticketBucket []byte, hash *chainhash.Hash,
height uint32, missed, revoked, spent, expired bool) error {
meta := dbTx.Metadata()
bucket := meta.Bucket(ticketBucket)
k := hash[:]
v := make([]byte, 5)
dbnamespace.ByteOrder.PutUint32(v, height)
v[4] = undoBitFlagsToByte(missed, revoked, spent, expired)
return bucket.Put(k[:], v[:])
}
// DbLoadAllTickets loads all the live tickets from the database into a treap.
func DbLoadAllTickets(dbTx database.Tx, ticketBucket []byte) (*tickettreap.Immutable, error) {
meta := dbTx.Metadata()
bucket := meta.Bucket(ticketBucket)
treap := tickettreap.NewImmutable()
err := bucket.ForEach(func(k []byte, v []byte) error {
if len(v) < 5 {
return ticketDBError(ErrLoadAllTickets, fmt.Sprintf("short "+
"read for ticket key %x when loading tickets", k))
}
h, err := chainhash.NewHash(k)
if err != nil {
return err
}
treapKey := tickettreap.Key(*h)
missed, revoked, spent, expired := undoBitFlagsFromByte(v[4])
treapValue := &tickettreap.Value{
Height: dbnamespace.ByteOrder.Uint32(v[0:4]),
Missed: missed,
Revoked: revoked,
Spent: spent,
Expired: expired,
}
treap = treap.Put(treapKey, treapValue)
return nil
})
if err != nil {
return nil, ticketDBError(ErrLoadAllTickets, fmt.Sprintf("failed to "+
"load all tickets for the bucket %s", string(ticketBucket)))
}
return treap, nil
}
// DbCreate initializes all the buckets required for the database and stores
// the current database version information.
func DbCreate(dbTx database.Tx) error {
meta := dbTx.Metadata()
// Create the bucket that houses information about the database's
// creation and version.
_, err := meta.CreateBucket(dbnamespace.StakeDbInfoBucketName)
if err != nil {
return err
}
dbInfo := &DatabaseInfo{
Version: currentDatabaseVersion,
Date: time.Now(),
UpgradeStarted: false,
}
err = DbPutDatabaseInfo(dbTx, dbInfo)
if err != nil {
return err
}
// Create the bucket that houses the live tickets of the best node.
_, err = meta.CreateBucket(dbnamespace.LiveTicketsBucketName)
if err != nil {
return err
}
// Create the bucket that houses the missed tickets of the best node.
_, err = meta.CreateBucket(dbnamespace.MissedTicketsBucketName)
if err != nil {
return err
}
// Create the bucket that houses the revoked tickets of the best node.
_, err = meta.CreateBucket(dbnamespace.RevokedTicketsBucketName)
if err != nil {
return err
}
// Create the bucket that houses block undo data for stake states on
// the main chain.
_, err = meta.CreateBucket(dbnamespace.StakeBlockUndoDataBucketName)
if err != nil {
return err
}
// Create the bucket that houses the tickets that were added with
// this block into the main chain.
_, err = meta.CreateBucket(dbnamespace.TicketsInBlockBucketName)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,516 @@
// chainio_test.go
package ticketdb
import (
"bytes"
"encoding/hex"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/decred/dcrd/blockchain/stake/internal/dbnamespace"
"github.com/decred/dcrd/blockchain/stake/internal/tickettreap"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
)
// hexToBytes converts a hex string to bytes, without returning any errors.
func hexToBytes(s string) []byte {
b, _ := hex.DecodeString(s)
return b
}
// newShaHashFromStr converts a 64 character hex string to a chainhash.Hash.
func newShaHashFromStr(s string) *chainhash.Hash {
h, _ := chainhash.NewHashFromStr(s)
return h
}
// TestDatabaseInfoSerialization ensures serializing and deserializing the
// database version information works as expected.
func TestDatabaseInfoSerialization(t *testing.T) {
t.Parallel()
tests := []struct {
name string
info DatabaseInfo
serialized []byte
}{
{
name: "not upgrade",
info: DatabaseInfo{
Version: currentDatabaseVersion,
Date: time.Unix(int64(0x57acca95), 0),
UpgradeStarted: false,
},
serialized: hexToBytes("0100000095caac57"),
},
{
name: "upgrade",
info: DatabaseInfo{
Version: currentDatabaseVersion,
Date: time.Unix(int64(0x57acca95), 0),
UpgradeStarted: true,
},
serialized: hexToBytes("0100008095caac57"),
},
}
for i, test := range tests {
// Ensure the state serializes to the expected value.
gotBytes := serializeDatabaseInfo(&test.info)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeDatabaseInfo #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
gotBytes, test.serialized)
continue
}
// Ensure the serialized bytes are decoded back to the expected
// state.
info, err := deserializeDatabaseInfo(test.serialized)
if err != nil {
t.Errorf("deserializeDatabaseInfo #%d (%s) "+
"unexpected error: %v", i, test.name, err)
continue
}
if !reflect.DeepEqual(info, &test.info) {
t.Errorf("deserializeDatabaseInfo #%d (%s) "+
"mismatched state - got %v, want %v", i,
test.name, info, test.info)
continue
}
}
}
// TestDbInfoDeserializeErrors performs negative tests against
// deserializing the database information to ensure error paths
// work as expected.
func TestDbInfoDeserializeErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errCode ErrorCode
}{
{
name: "short read",
serialized: hexToBytes("0000"),
errCode: ErrDatabaseInfoShortRead,
},
}
for _, test := range tests {
// Ensure the expected error type is returned.
_, err := deserializeDatabaseInfo(test.serialized)
ticketDBErr, ok := err.(DBError)
if !ok {
t.Errorf("couldn't convert deserializeDatabaseInfo error "+
"to ticket db error (err: %v)", err)
continue
}
if ticketDBErr.GetCode() != test.errCode {
t.Errorf("deserializeDatabaseInfo (%s): expected error type "+
"does not match - got %v, want %v", test.name,
ticketDBErr.ErrorCode, test.errCode)
continue
}
}
}
// TestBestChainStateSerialization ensures serializing and deserializing the
// best chain state works as expected.
func TestBestChainStateSerialization(t *testing.T) {
t.Parallel()
hash1 := chainhash.HashFuncH([]byte{0x00})
hash2 := chainhash.HashFuncH([]byte{0x01})
hash3 := chainhash.HashFuncH([]byte{0x02})
hash4 := chainhash.HashFuncH([]byte{0x03})
hash5 := chainhash.HashFuncH([]byte{0x04})
tests := []struct {
name string
state BestChainState
serialized []byte
}{
{
name: "generic block",
state: BestChainState{
Hash: *newShaHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
Height: 12323,
Live: 29399,
Missed: 293929392,
Revoked: 349839493,
PerBlock: 5,
NextWinners: []chainhash.Hash{hash1, hash2, hash3, hash4, hash5},
},
serialized: hexToBytes("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d619000000000023300000d7720000b0018511000000008520da140000000005000ce8d4ef4dd7cd8d62dfded9d4edb0a774ae6a41929a74da23109e8f11139c874a6c419a1e25c85327115c4ace586decddfe2990ed8f3d4d801871158338501d49af37ab5270015fe25276ea5a3bb159d852943df23919522a202205fb7d175cb706d561742ad3671703c247eb927ee8a386369c79644131cdeb2c5c26bf6c5d4c6eb9e38415034f4c93d3304d10bef38bf0ad420eefd0f72f940f11c5857786"),
},
}
for i, test := range tests {
// Ensure the state serializes to the expected value.
gotBytes := serializeBestChainState(test.state)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeBestChainState #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
gotBytes, test.serialized)
continue
}
// Ensure the serialized bytes are decoded back to the expected
// state.
state, err := deserializeBestChainState(test.serialized)
if err != nil {
t.Errorf("deserializeBestChainState #%d (%s) "+
"unexpected error: %v", i, test.name, err)
continue
}
if !reflect.DeepEqual(state, test.state) {
t.Errorf("deserializeBestChainState #%d (%s) "+
"mismatched state - got %v, want %v", i,
test.name, state, test.state)
continue
}
}
}
// TestBestChainStateDeserializeErrors performs negative tests against
// deserializing the chain state to ensure error paths work as expected.
func TestBestChainStateDeserializeErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errCode ErrorCode
}{
{
name: "short read",
serialized: hexToBytes("0000"),
errCode: ErrChainStateShortRead,
},
}
for _, test := range tests {
// Ensure the expected error type is returned.
_, err := deserializeBestChainState(test.serialized)
ticketDBErr, ok := err.(DBError)
if !ok {
t.Errorf("couldn't convert deserializeBestChainState error "+
"to ticket db error (err: %v)", err)
continue
}
if ticketDBErr.GetCode() != test.errCode {
t.Errorf("deserializeBestChainState (%s): expected error type "+
"does not match - got %v, want %v", test.name,
ticketDBErr.ErrorCode, test.errCode)
continue
}
}
}
// TestBlockUndoDataSerializing ensures serializing and deserializing the
// block undo data works as expected.
func TestBlockUndoDataSerializing(t *testing.T) {
t.Parallel()
tests := []struct {
name string
utds []UndoTicketData
serialized []byte
}{
{
name: "two ticket datas",
utds: []UndoTicketData{
UndoTicketData{
TicketHash: chainhash.HashFuncH([]byte{0x00}),
TicketHeight: 123456,
Missed: true,
Revoked: false,
Spent: false,
Expired: true,
},
UndoTicketData{
TicketHash: chainhash.HashFuncH([]byte{0x01}),
TicketHeight: 122222,
Missed: false,
Revoked: true,
Spent: true,
Expired: false,
},
},
serialized: hexToBytes("0ce8d4ef4dd7cd8d62dfded9d4edb0a774ae6a41929a74da23109e8f11139c8740e20100094a6c419a1e25c85327115c4ace586decddfe2990ed8f3d4d801871158338501d6edd010006"),
},
}
for i, test := range tests {
// Ensure the state serializes to the expected value.
gotBytes := serializeBlockUndoData(test.utds)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeBlockUndoData #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
gotBytes, test.serialized)
continue
}
// Ensure the serialized bytes are decoded back to the expected
// state.
utds, err := deserializeBlockUndoData(test.serialized)
if err != nil {
t.Errorf("deserializeBlockUndoData #%d (%s) "+
"unexpected error: %v", i, test.name, err)
continue
}
if !reflect.DeepEqual(utds, test.utds) {
t.Errorf("deserializeBlockUndoData #%d (%s) "+
"mismatched state - got %v, want %v", i,
test.name, utds, test.utds)
continue
}
}
}
// TestBlockUndoDataDeserializing performs negative tests against decoding block
// undo data to ensure error paths work as expected.
func TestBlockUndoDataDeserializingErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errCode ErrorCode
}{
{
name: "short read",
serialized: hexToBytes("00"),
errCode: ErrUndoDataShortRead,
},
{
name: "bad size",
serialized: hexToBytes("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
errCode: ErrUndoDataCorrupt,
},
}
for _, test := range tests {
// Ensure the expected error type is returned.
_, err := deserializeBlockUndoData(test.serialized)
ticketDBErr, ok := err.(DBError)
if !ok {
t.Errorf("couldn't convert deserializeBlockUndoData error "+
"to ticket db error (err: %v)", err)
continue
}
if ticketDBErr.GetCode() != test.errCode {
t.Errorf("deserializeBlockUndoData (%s): expected error type "+
"does not match - got %v, want %v", test.name,
ticketDBErr.ErrorCode, test.errCode)
continue
}
}
}
// TestTicketHashesSerializing ensures serializing and deserializing the
// ticket hashes works as expected.
func TestTicketHashesSerializing(t *testing.T) {
t.Parallel()
hash1 := chainhash.HashFuncH([]byte{0x00})
hash2 := chainhash.HashFuncH([]byte{0x01})
tests := []struct {
name string
ths TicketHashes
serialized []byte
}{
{
name: "two ticket hashes",
ths: TicketHashes{
hash1,
hash2,
},
serialized: hexToBytes("0ce8d4ef4dd7cd8d62dfded9d4edb0a774ae6a41929a74da23109e8f11139c874a6c419a1e25c85327115c4ace586decddfe2990ed8f3d4d801871158338501d"),
},
}
for i, test := range tests {
// Ensure the state serializes to the expected value.
gotBytes := serializeTicketHashes(test.ths)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeBlockUndoData #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
gotBytes, test.serialized)
continue
}
// Ensure the serialized bytes are decoded back to the expected
// state.
ths, err := deserializeTicketHashes(test.serialized)
if err != nil {
t.Errorf("deserializeBlockUndoData #%d (%s) "+
"unexpected error: %v", i, test.name, err)
continue
}
if !reflect.DeepEqual(ths, test.ths) {
t.Errorf("deserializeBlockUndoData #%d (%s) "+
"mismatched state - got %v, want %v", i,
test.name, ths, test.ths)
continue
}
}
}
// TestTicketHashesDeserializingErrors performs negative tests against decoding block
// undo data to ensure error paths work as expected.
func TestTicketHashesDeserializingErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errCode ErrorCode
}{
{
name: "short read",
serialized: hexToBytes("00"),
errCode: ErrTicketHashesShortRead,
},
{
name: "bad size",
serialized: hexToBytes("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
errCode: ErrTicketHashesCorrupt,
},
}
for _, test := range tests {
// Ensure the expected error type is returned.
_, err := deserializeTicketHashes(test.serialized)
ticketDBErr, ok := err.(DBError)
if !ok {
t.Errorf("couldn't convert deserializeTicketHashes error "+
"to ticket db error (err: %v)", err)
continue
}
if ticketDBErr.GetCode() != test.errCode {
t.Errorf("deserializeTicketHashes (%s): expected error type "+
"does not match - got %v, want %v", test.name,
ticketDBErr.ErrorCode, test.errCode)
continue
}
}
}
// TestLiveDatabase tests various functions that require a live database.
func TestLiveDatabase(t *testing.T) {
// Create a new database to store the accepted stake node data into.
dbName := "ffldb_ticketdb_test"
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
testDb, err := database.Create(testDbType, dbPath, chaincfg.SimNetParams.Net)
if err != nil {
t.Fatalf("error creating db: %v", err)
}
// Setup a teardown.
defer os.RemoveAll(dbPath)
defer os.RemoveAll(testDbRoot)
defer testDb.Close()
// Initialize the database, then try to read the version.
err = testDb.Update(func(dbTx database.Tx) error {
return DbCreate(dbTx)
})
if err != nil {
t.Fatalf("%v", err.Error())
}
var dbi *DatabaseInfo
err = testDb.View(func(dbTx database.Tx) error {
dbi, err = DbFetchDatabaseInfo(dbTx)
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("%v", err.Error())
}
if dbi.Version != currentDatabaseVersion {
t.Fatalf("bad version after reading from DB; want %v, got %v",
currentDatabaseVersion, dbi.Version)
}
// Test storing arbitrary ticket treaps.
ticketMap := make(map[tickettreap.Key]*tickettreap.Value)
tickets := make([]chainhash.Hash, 5)
for i := 0; i < 4; i++ {
h := chainhash.HashFuncH(bytes.Repeat([]byte{0x01}, i))
ticketMap[tickettreap.Key(h)] = &tickettreap.Value{
Height: 12345 + uint32(i),
Missed: i%2 == 0,
Revoked: i%2 != 0,
Spent: i%2 == 0,
Expired: i%2 != 0,
}
tickets[i] = h
}
err = testDb.Update(func(dbTx database.Tx) error {
for k, v := range ticketMap {
h := chainhash.Hash(k)
err = DbPutTicket(dbTx, dbnamespace.LiveTicketsBucketName, &h,
v.Height, v.Missed, v.Revoked, v.Spent, v.Expired)
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatalf("%v", err.Error())
}
var treap *tickettreap.Immutable
ticketMap2 := make(map[tickettreap.Key]*tickettreap.Value)
err = testDb.View(func(dbTx database.Tx) error {
treap, err = DbLoadAllTickets(dbTx, dbnamespace.LiveTicketsBucketName)
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("%v", err.Error())
}
treap.ForEach(func(k tickettreap.Key, v *tickettreap.Value) bool {
ticketMap2[k] = v
return true
})
if !reflect.DeepEqual(ticketMap, ticketMap2) {
t.Fatalf("not same ticket maps")
}
}

View File

@ -0,0 +1,95 @@
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ticketdb
import (
"fmt"
)
// ErrorCode identifies a kind of error.
type ErrorCode int
// These constants are used to identify a specific RuleError.
const (
// ErrUndoDataShortRead indicates that the given undo serialized data
// was took small.
ErrUndoDataShortRead = iota
// ErrUndoDataNoEntries indicates that the data for undoing ticket data
// in a serialized entry was corrupt.
ErrUndoDataCorrupt
// ErrTicketHashesShortRead indicates that the given ticket hashes
// serialized data was took small.
ErrTicketHashesShortRead
// ErrTicketHashesCorrupt indicates that the data for ticket hashes
// in a serialized entry was corrupt.
ErrTicketHashesCorrupt
// ErrUninitializedBucket indicates that a database bucket was not
// initialized and therefore could not be written to or read from.
ErrUninitializedBucket
// ErrMissingKey indicates that a key was not found in a bucket.
ErrMissingKey
// ErrChainStateShortRead indicates that the given chain state data
// was too small.
ErrChainStateShortRead
// ErrDatabaseInfoShortRead indicates that the given database information
// was too small.
ErrDatabaseInfoShortRead
// ErrLoadAllTickets indicates that there was an error loading the tickets
// from the database, presumably at startup.
ErrLoadAllTickets
)
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrUndoDataShortRead: "ErrUndoDataShortRead",
ErrUndoDataCorrupt: "ErrUndoDataCorrupt",
ErrTicketHashesShortRead: "ErrTicketHashesShortRead",
ErrTicketHashesCorrupt: "ErrTicketHashesCorrupt",
ErrUninitializedBucket: "ErrUninitializedBucket",
ErrMissingKey: "ErrMissingKey",
ErrChainStateShortRead: "ErrChainStateShortRead",
ErrDatabaseInfoShortRead: "ErrDatabaseInfoShortRead",
ErrLoadAllTickets: "ErrLoadAllTickets",
}
// String returns the ErrorCode as a human-readable name.
func (e ErrorCode) String() string {
if s := errorCodeStrings[e]; s != "" {
return s
}
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
}
// DBError identifies a an error in the stake database for tickets.
// The caller can use type assertions to determine if a failure was
// specifically due to a rule violation and access the ErrorCode field to
// ascertain the specific reason for the rule violation.
type DBError struct {
ErrorCode ErrorCode // Describes the kind of error
Description string // Human readable description of the issue
}
// Error satisfies the error interface and prints human-readable errors.
func (e DBError) Error() string {
return e.Description
}
// GetCode satisfies the error interface and prints human-readable errors.
func (e DBError) GetCode() ErrorCode {
return e.ErrorCode
}
// DBError creates an DBError given a set of arguments.
func ticketDBError(c ErrorCode, desc string) DBError {
return DBError{ErrorCode: c, Description: desc}
}

View File

@ -0,0 +1,66 @@
// Copyright (c) 2014 Conformal Systems LLC.
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ticketdb_test
import (
"testing"
"github.com/decred/dcrd/blockchain/stake/internal/ticketdb"
)
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
func TestErrorCodeStringer(t *testing.T) {
tests := []struct {
in ticketdb.ErrorCode
want string
}{
{ticketdb.ErrUndoDataShortRead, "ErrUndoDataShortRead"},
{ticketdb.ErrUndoDataCorrupt, "ErrUndoDataCorrupt"},
{ticketdb.ErrTicketHashesShortRead, "ErrTicketHashesShortRead"},
{ticketdb.ErrTicketHashesCorrupt, "ErrTicketHashesCorrupt"},
{ticketdb.ErrUninitializedBucket, "ErrUninitializedBucket"},
{ticketdb.ErrMissingKey, "ErrMissingKey"},
{ticketdb.ErrChainStateShortRead, "ErrChainStateShortRead"},
{ticketdb.ErrDatabaseInfoShortRead, "ErrDatabaseInfoShortRead"},
{ticketdb.ErrLoadAllTickets, "ErrLoadAllTickets"},
{0xffff, "Unknown ErrorCode (65535)"},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
// TestRuleError tests the error output for the RuleError type.
func TestRuleError(t *testing.T) {
tests := []struct {
in ticketdb.DBError
want string
}{
{ticketdb.DBError{Description: "duplicate block"},
"duplicate block",
},
{ticketdb.DBError{Description: "human-readable error"},
"human-readable error",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}

View File

@ -75,6 +75,12 @@ type Key chainhash.Hash
type Value struct {
// Height is the block height of the associated ticket.
Height uint32
// Flags defining the ticket state.
Missed bool
Revoked bool
Spent bool
Expired bool
}
// treapNode represents a node in the treap.

View File

@ -5,7 +5,10 @@
package tickettreap
import "bytes"
import (
"bytes"
"sort"
)
// cloneTreapNode returns a shallow copy of the passed node.
func cloneTreapNode(node *treapNode) *treapNode {
@ -348,6 +351,43 @@ func (t *Immutable) ForEach(fn func(k Key, v *Value) bool) {
}
}
// FetchWinnersAndExpired is a ticket database specific function which iterates
// over the entire treap and finds winners at selected indexes and all tickets
// whose height is less than or equal to the passed height. These are returned
// as slices of pointers to keys, which can be recast as []*chainhash.Hash.
// This is only used for benchmarking and is not consensus compatible.
func (t *Immutable) FetchWinnersAndExpired(idxs []int, height uint32) ([]*Key, []*Key) {
if idxs == nil {
return nil, nil
}
sortedIdxs := sort.IntSlice(idxs)
sort.Sort(sortedIdxs)
// TODO buffer winners according to the TicketsPerBlock value from
// chaincfg?
idx := 0
var winners []*Key
var expired []*Key
winnerIdx := 0
t.ForEach(func(k Key, v *Value) bool {
if v.Height <= height {
expired = append(expired, &k)
}
if idx == sortedIdxs[winnerIdx] {
winners = append(winners, &k)
if winnerIdx+1 < len(sortedIdxs) {
winnerIdx++
}
}
idx++
return true
})
return winners, expired
}
// NewImmutable returns a new empty immutable treap ready for use. See the
// documentation for the Immutable structure for more details.
func NewImmutable() *Immutable {

View File

@ -8,8 +8,13 @@ package tickettreap
import (
"bytes"
"crypto/sha256"
"math/rand"
"reflect"
"runtime"
"testing"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
)
// TestImmutableEmpty ensures calling functions on an empty immutable treap
@ -499,3 +504,116 @@ func TestImmutableSnapshot(t *testing.T) {
expectedSize -= (nodeFieldsSize + uint64(len(key)) + 4)
}
}
// randHash generates a "random" hash using a deterministic source.
func randHash(r rand.Source) *chainhash.Hash {
hash := new(chainhash.Hash)
for i := 0; i < chainhash.HashSize/2; i++ {
random := uint64(r.Int63())
randByte1 := random % 255
randByte2 := (random >> 8) % 255
hash[i] = uint8(randByte1)
hash[i+1] = uint8(randByte2)
}
return hash
}
// pickRandWinners picks tickets per block many random "winners" and returns
// their indexes.
func pickRandWinners(sz int, r rand.Source) []int {
if sz == 0 {
panic("bad sz!")
}
perBlock := int(chaincfg.MainNetParams.TicketsPerBlock)
winners := make([]int, perBlock)
for i := 0; i < perBlock; i++ {
winners[i] = int(r.Int63() % int64(sz))
}
return winners
}
// TestImmutableMemory tests the memory for creating n many nodes cloned and
// modified in the memory analogous to what is actually seen in the Decred
// mainnet, then analyzes the relative memory usage with runtime stats.
func TestImmutableMemory(t *testing.T) {
// Collect information about memory at the start.
runtime.GC()
memStats := new(runtime.MemStats)
runtime.ReadMemStats(memStats)
initAlloc := memStats.Alloc
initTotal := memStats.TotalAlloc
// Insert a bunch of sequential keys while checking several of the treap
// functions work as expected.
randSource := rand.NewSource(12345)
numItems := 40960
numNodes := 128
nodeTreaps := make([]*Immutable, numNodes)
testTreap := NewImmutable()
// Populate.
for i := 0; i < numItems; i++ {
randomHash := randHash(randSource)
testTreap = testTreap.Put(Key(*randomHash),
&Value{uint32(randSource.Int63()), false, true, false, true})
}
nodeTreaps[0] = testTreap
// Start populating the "nodes". Ignore expiring tickets for the
// sake of testing. For each node, remove 5 "random" tickets and
// insert 5 "random" tickets.
maxHeight := uint32(0xFFFFFFFF)
lastTreap := nodeTreaps[0]
lastTotal := initTotal
allocsPerNode := make([]uint64, numNodes)
for i := 1; i < numNodes; i++ {
treapCopy := lastTreap
sz := treapCopy.Len()
winnerIdxs := pickRandWinners(sz, randSource)
winners, _ := treapCopy.FetchWinnersAndExpired(winnerIdxs, maxHeight)
for _, k := range winners {
treapCopy = treapCopy.Delete(*k)
}
perBlock := int(chaincfg.MainNetParams.TicketsPerBlock)
for i := 0; i < perBlock; i++ {
randomHash := randHash(randSource)
treapCopy = treapCopy.Put(Key(*randomHash),
&Value{uint32(randSource.Int63()), false, true, false, true})
}
runtime.ReadMemStats(memStats)
finalTotal := memStats.TotalAlloc
allocsPerNode[i] = finalTotal - lastTotal
lastTotal = finalTotal
nodeTreaps[i] = treapCopy
lastTreap = treapCopy
}
avgUint64 := func(uis []uint64) uint64 {
var sum uint64
for i := range uis {
sum += uis[i]
}
return sum / uint64(len(uis))
}
runtime.GC()
runtime.ReadMemStats(memStats)
finalAlloc := memStats.Alloc
t.Logf("Ticket treaps for %v nodes allocated %v many bytes total after GC",
numNodes, finalAlloc-initAlloc)
t.Logf("Ticket treaps allocated an average of %v many bytes per node",
avgUint64(allocsPerNode))
// Keep all the treaps alive in memory so GC doesn't rm them in
// the previous step.
lenTest := nodeTreaps[0].count == nodeTreaps[0].Len()
if !lenTest {
t.Errorf("bad len test")
}
}

View File

@ -9,7 +9,10 @@ package stake
import (
"encoding/binary"
"fmt"
"math"
"sort"
"github.com/decred/dcrd/blockchain/stake/internal/tickettreap"
"github.com/decred/dcrd/chaincfg/chainhash"
)
@ -27,11 +30,11 @@ type Hash256PRNG struct {
// NewHash256PRNG creates a pointer to a newly created hash256PRNG.
func NewHash256PRNG(seed []byte) *Hash256PRNG {
// idx and lastHash are automatically initialized
// as 0. We initialize the seed by appending a constant
// as 0. We initialize the seed by appending a constant
// to it and hashing to give 32 bytes. This ensures
// that regardless of the input, the PRNG is always
// doing a short number of rounds because it only
// has to hash < 64 byte messages. The constant is
// has to hash < 64 byte messages. The constant is
// derived from the hexadecimal representation of
// pi.
cst := []byte{0x24, 0x3F, 0x6A, 0x88,
@ -127,8 +130,8 @@ func intInSlice(i int, sl []int) bool {
return false
}
// FindTicketIdxs finds n many unique index numbers for a list length size.
func FindTicketIdxs(size int64, n int, prng *Hash256PRNG) ([]int, error) {
// findTicketIdxs finds n many unique index numbers for a list length size.
func findTicketIdxs(size int64, n int, prng *Hash256PRNG) ([]int, error) {
if size < int64(n) {
return nil, fmt.Errorf("list size too small")
}
@ -150,3 +153,109 @@ func FindTicketIdxs(size int64, n int, prng *Hash256PRNG) ([]int, error) {
return list, nil
}
// FindTicketIdxs is the exported version of findTicketIdxs used for testing.
func FindTicketIdxs(size int64, n int, prng *Hash256PRNG) ([]int, error) {
return findTicketIdxs(size, n, prng)
}
// fetchWinners is a ticket database specific function which iterates over the
// entire treap and finds winners at selected indexes. These are returned
// as a slice of pointers to keys, which can be recast as []*chainhash.Hash.
// Importantly, it maintains the list of winners in the same order as specified
// in the original idxs passed to the function.
func fetchWinners(idxs []int, t *tickettreap.Immutable) ([]*tickettreap.Key, error) {
if idxs == nil {
return nil, fmt.Errorf("empty idxs list")
}
if t == nil || t.Len() == 0 {
return nil, fmt.Errorf("missing or empty treap")
}
// maxInt returns the maximum integer from a list of integers.
maxInt := func(idxs []int) int {
max := math.MinInt32
for _, i := range idxs {
if i > max {
max = i
}
}
return max
}
max := maxInt(idxs)
if max >= t.Len() {
return nil, fmt.Errorf("idx %v out of bounds", max)
}
minInt := func(idxs []int) int {
min := math.MaxInt32
for _, i := range idxs {
if i < min {
min = i
}
}
return min
}
min := minInt(idxs)
if min < 0 {
return nil, fmt.Errorf("idx %v out of bounds", min)
}
originalIdxs := make([]int, len(idxs))
copy(originalIdxs[:], idxs[:])
sortedIdxs := sort.IntSlice(idxs)
sort.Sort(sortedIdxs)
// originalIdx returns the original index of the lucky
// number in the idxs slice, so that the order is correct.
originalIdx := func(idx int) int {
for i := range originalIdxs {
if idx == originalIdxs[i] {
return i
}
}
// This will cause a panic. It should never, ever
// happen because the investigated index will always
// be in the original indexes.
return -1
}
idx := 0
winnerIdx := 0
winners := make([]*tickettreap.Key, len(idxs))
t.ForEach(func(k tickettreap.Key, v *tickettreap.Value) bool {
if idx > max {
return false
}
if idx == sortedIdxs[winnerIdx] {
winners[originalIdx(idx)] = &k
if winnerIdx+1 < len(sortedIdxs) {
winnerIdx++
}
}
idx++
return true
})
return winners, nil
}
// fetchExpired is a ticket database specific function which iterates over the
// entire treap and finds tickets that are equal or less than the given height.
// These are returned as a slice of pointers to keys, which can be recast as
// []*chainhash.Hash.
func fetchExpired(height uint32, t *tickettreap.Immutable) []*tickettreap.Key {
var expired []*tickettreap.Key
t.ForEach(func(k tickettreap.Key, v *tickettreap.Value) bool {
if v.Height <= height {
expired = append(expired, &k)
}
return true
})
return expired
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package stake_test
package stake
import (
"bytes"
@ -12,13 +12,13 @@ import (
"sort"
"testing"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/blockchain/stake/internal/tickettreap"
"github.com/decred/dcrd/chaincfg/chainhash"
)
func TestBasicPRNG(t *testing.T) {
seed := chainhash.HashFuncB([]byte{0x01})
prng := stake.NewHash256PRNG(seed)
prng := NewHash256PRNG(seed)
for i := 0; i < 100000; i++ {
prng.Hash256Rand()
}
@ -55,7 +55,7 @@ func swap(s []byte) []byte {
type TicketDataSlice []*TicketData
func NewTicketDataSliceEmpty() TicketDataSlice {
slice := make([]*TicketData, 0)
var slice []*TicketData
return TicketDataSlice(slice)
}
@ -80,19 +80,19 @@ func (tds TicketDataSlice) Len() int { return len(tds) }
func TestLotteryNumSelection(t *testing.T) {
// Test finding ticket indexes.
seed := chainhash.HashFuncB([]byte{0x01})
prng := stake.NewHash256PRNG(seed)
prng := NewHash256PRNG(seed)
ticketsInPool := int64(56789)
tooFewTickets := int64(4)
justEnoughTickets := int64(5)
ticketsPerBlock := 5
_, err := stake.FindTicketIdxs(tooFewTickets, ticketsPerBlock, prng)
_, err := FindTicketIdxs(tooFewTickets, ticketsPerBlock, prng)
if err == nil {
t.Errorf("got unexpected no error for FindTicketIdxs too few tickets " +
"test")
}
tickets, err := stake.FindTicketIdxs(ticketsInPool, ticketsPerBlock, prng)
tickets, err := FindTicketIdxs(ticketsInPool, ticketsPerBlock, prng)
if err != nil {
t.Errorf("got unexpected error for FindTicketIdxs 1 test")
}
@ -104,7 +104,7 @@ func TestLotteryNumSelection(t *testing.T) {
// Ensure that it can find all suitable ticket numbers in a small
// bucket of tickets.
tickets, err = stake.FindTicketIdxs(justEnoughTickets, ticketsPerBlock, prng)
tickets, err = FindTicketIdxs(justEnoughTickets, ticketsPerBlock, prng)
if err != nil {
t.Errorf("got unexpected error for FindTicketIdxs 2 test")
}
@ -122,6 +122,56 @@ func TestLotteryNumSelection(t *testing.T) {
}
}
func TestLotteryNumErrors(t *testing.T) {
seed := chainhash.HashFuncB([]byte{0x01})
prng := NewHash256PRNG(seed)
// Too big pool.
_, err := FindTicketIdxs(1000000000000, 5, prng)
if err == nil {
t.Errorf("Expected pool size too big error")
}
}
func TestFetchWinnersErrors(t *testing.T) {
treap := new(tickettreap.Immutable)
for i := 0; i < 0xff; i++ {
h := chainhash.HashFuncH([]byte{byte(i)})
v := &tickettreap.Value{
Height: uint32(i),
Missed: i%2 == 0,
Revoked: i%2 != 0,
Spent: i%2 == 0,
Expired: i%2 != 0,
}
treap = treap.Put(tickettreap.Key(h), v)
}
// No indexes.
_, err := fetchWinners(nil, treap)
if err == nil {
t.Errorf("Expected nil slice error")
}
// No treap.
_, err = fetchWinners([]int{1, 2, 3, 4, -1}, nil)
if err == nil {
t.Errorf("Expected nil treap error")
}
// Bad index too small.
_, err = fetchWinners([]int{1, 2, 3, 4, -1}, treap)
if err == nil {
t.Errorf("Expected index too small error")
}
// Bad index too big.
_, err = fetchWinners([]int{1, 2, 3, 4, 256}, treap)
if err == nil {
t.Errorf("Expected index too big error")
}
}
func TestTicketSorting(t *testing.T) {
ticketsPerBlock := 5
ticketPoolSize := uint16(8192)
@ -188,7 +238,7 @@ func TestTicketSorting(t *testing.T) {
func BenchmarkHashPRNG(b *testing.B) {
seed := chainhash.HashFuncB([]byte{0x01})
prng := stake.NewHash256PRNG(seed)
prng := NewHash256PRNG(seed)
for n := 0; n < b.N; n++ {
prng.Hash256Rand()

View File

@ -26,7 +26,7 @@ import (
// TxType indicates the type of tx (regular or stake type).
type TxType int
// Possible TxTypes. Statically declare these so that they might be used in
// Possible TxTypes. Statically declare these so that they might be used in
// consensus code.
const (
TxTypeRegular = 0
@ -44,7 +44,7 @@ const (
MaxOutputsPerSStx = MaxInputsPerSStx*2 + 1
// NumInputsPerSSGen is the exact number of inputs for an SSGen
// (stakebase) tx. Inputs are a tagged SStx output and a stakebase (null)
// (stakebase) tx. Inputs are a tagged SStx output and a stakebase (null)
// input.
NumInputsPerSSGen = 2 // SStx and stakebase
@ -488,7 +488,7 @@ func TxSSRtxStakeOutputInfo(tx *dcrutil.Tx, params *chaincfg.Params) ([]bool,
// SStxNullOutputAmounts takes an array of input amounts, change amounts, and a
// ticket purchase amount, calculates the adjusted proportion from the purchase
// amount, stores it in an array, then returns the array. That is, for any given
// amount, stores it in an array, then returns the array. That is, for any given
// SStx, this function calculates the proportional outputs that any single user
// should receive.
// Returns: (1) Fees (2) Output Amounts (3) Error
@ -511,7 +511,7 @@ func SStxNullOutputAmounts(amounts []int64,
contribAmounts := make([]int64, lengthAmounts)
sum := int64(0)
// Now we want to get the adjusted amounts. The algorithm is like this:
// Now we want to get the adjusted amounts. The algorithm is like this:
// 1 foreach amount
// 2 subtract change from input, store
// 3 add this amount to sum
@ -534,7 +534,7 @@ func SStxNullOutputAmounts(amounts []int64,
// CalculateRewards takes a list of SStx adjusted output amounts, the amount used
// to purchase that ticket, and the reward for an SSGen tx and subsequently
// generates what the outputs should be in the SSGen tx. If used for calculating
// generates what the outputs should be in the SSGen tx. If used for calculating
// the outputs for an SSRtx, pass 0 for subsidy.
func CalculateRewards(amounts []int64, amountTicket int64,
subsidy int64) []int64 {
@ -614,7 +614,7 @@ func VerifySStxAmounts(sstxAmts []int64, sstxCalcAmts []int64) error {
// 5. ssSpendPkhs: A list of payee PKHs from OP_SSGEN tagged outputs of the SSGen
// or SSRtx.
// 6. ssSpendCalcAmts: A list of payee amounts that was calculated based on
// the input SStx. These are the maximum possible amounts that can be
// the input SStx. These are the maximum possible amounts that can be
// transacted from this output.
// 7. isVote: Whether this is a vote (true) or revocation (false).
// 8. spendRules: Spending rules for each output in terms of fees allowable
@ -737,7 +737,7 @@ func VerifyStakingPkhsAndAmounts(
// Stake Transaction Identification Functions
// --------------------------------------------------------------------------------
// IsSStx returns whether or not a transaction is an SStx. It does some
// IsSStx returns whether or not a transaction is an SStx. It does some
// simple validation steps to make sure the number of inputs, number of
// outputs, and the input/output scripts are valid.
//
@ -867,7 +867,7 @@ func IsSStx(tx *dcrutil.Tx) (bool, error) {
return true, nil
}
// IsSSGen returns whether or not a transaction is an SSGen tx. It does some
// IsSSGen returns whether or not a transaction is an SSGen tx. It does some
// simple validation steps to make sure the number of inputs, number of
// outputs, and the input/output scripts are valid.
//
@ -957,7 +957,7 @@ func IsSSGen(tx *dcrutil.Tx) (bool, error) {
// Ensure that the second input is an SStx tagged output.
// TODO: Do this in validate, as we don't want to actually lookup
// old tx here. This function is for more general sorting.
// old tx here. This function is for more general sorting.
// Ensure that the first output is an OP_RETURN push.
zeroethOutputVersion := msgTx.TxOut[0].Version
@ -1046,7 +1046,7 @@ func IsSSGen(tx *dcrutil.Tx) (bool, error) {
return true, nil
}
// IsSSRtx returns whether or not a transaction is an SSRtx. It does some
// IsSSRtx returns whether or not a transaction is an SSRtx. It does some
// simple validation steps to make sure the number of inputs, number of
// outputs, and the input/output scripts are valid.
//

File diff suppressed because it is too large Load Diff

View File

@ -1,547 +0,0 @@
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package stake_test
import (
"bytes"
"compress/bzip2"
"encoding/gob"
"fmt"
"math/big"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrutil"
)
// cloneTicketDB makes a deep copy of a ticket DB by
// serializing it to a gob and then deserializing it
// into an empty container.
func cloneTicketDB(tmdb *stake.TicketDB) (stake.TicketMaps, error) {
mapsPointer := tmdb.DumpMapsPointer()
mapsBytes, err := mapsPointer.GobEncode()
if err != nil {
return stake.TicketMaps{},
fmt.Errorf("clone db error: could not serialize ticketMaps")
}
var mapsCopy stake.TicketMaps
if err := mapsCopy.GobDecode(mapsBytes); err != nil {
return stake.TicketMaps{},
fmt.Errorf("clone db error: could not deserialize " +
"ticketMaps")
}
return mapsCopy, nil
}
// hashInSlice returns whether a hash exists in a slice or not.
func hashInSlice(h *chainhash.Hash, list []*chainhash.Hash) bool {
for _, hash := range list {
if h.IsEqual(hash) {
return true
}
}
return false
}
func TestTicketDB(t *testing.T) {
// Declare some useful variables
testBCHeight := int64(168)
// Set up a blockchain
chain, teardownFunc, err := chainSetup("ticketdbunittests",
simNetParams)
if err != nil {
t.Errorf("Failed to setup chain instance: %v", err)
return
}
defer teardownFunc()
filename := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2")
fi, err := os.Open(filename)
bcStream := bzip2.NewReader(fi)
defer fi.Close()
// Create a buffer of the read file
bcBuf := new(bytes.Buffer)
bcBuf.ReadFrom(bcStream)
// Create decoder from the buffer and a map to store the data
bcDecoder := gob.NewDecoder(bcBuf)
testBlockchain := make(map[int64][]byte)
// Decode the blockchain into the map
if err := bcDecoder.Decode(&testBlockchain); err != nil {
t.Errorf("error decoding test blockchain")
}
timeSource := blockchain.NewMedianTime()
var CopyOfMapsAtBlock50, CopyOfMapsAtBlock168 stake.TicketMaps
var ticketsToSpendIn167 []chainhash.Hash
var sortedTickets167 []*stake.TicketData
for i := int64(0); i <= testBCHeight; i++ {
if i == 0 {
continue
}
block, err := dcrutil.NewBlockFromBytes(testBlockchain[i])
if err != nil {
t.Fatalf("block deserialization error on block %v", i)
}
block.SetHeight(i)
_, _, err = chain.ProcessBlock(block, timeSource, blockchain.BFNone)
if err != nil {
t.Fatalf("failed to process block %v: %v", i, err)
}
if i == 50 {
// Create snapshot of tmdb at block 50
CopyOfMapsAtBlock50, err = cloneTicketDB(chain.TMDB())
if err != nil {
t.Errorf("db cloning at block 50 failure! %v", err)
}
}
// Test to make sure that ticket selection is working correctly.
if i == 167 {
// Sort the entire list of tickets lexicographically by sorting
// each bucket and then appending it to the list. Then store it
// to use in the next block.
totalTickets := 0
sortedSlice := make([]*stake.TicketData, 0)
for i := 0; i < stake.BucketsSize; i++ {
tix, err := chain.TMDB().DumpLiveTickets(uint8(i))
if err != nil {
t.Errorf("error dumping live tickets")
}
mapLen := len(tix)
totalTickets += mapLen
tempTdSlice := stake.NewTicketDataSlice(mapLen)
itr := 0 // Iterator
for _, td := range tix {
tempTdSlice[itr] = td
itr++
}
sort.Sort(tempTdSlice)
sortedSlice = append(sortedSlice, tempTdSlice...)
}
sortedTickets167 = sortedSlice
}
if i == 168 {
parentBlock, err := dcrutil.NewBlockFromBytes(testBlockchain[i-1])
if err != nil {
t.Errorf("block deserialization error on block %v", i-1)
}
pbhB, err := parentBlock.MsgBlock().Header.Bytes()
if err != nil {
t.Errorf("block header serialization error")
}
prng := stake.NewHash256PRNG(pbhB)
ts, err := stake.FindTicketIdxs(int64(len(sortedTickets167)),
int(simNetParams.TicketsPerBlock), prng)
if err != nil {
t.Errorf("failure on FindTicketIdxs")
}
for _, idx := range ts {
ticketsToSpendIn167 =
append(ticketsToSpendIn167, sortedTickets167[idx].SStxHash)
}
// Make sure that the tickets that were supposed to be spent or
// missed were.
spentTix, err := chain.TMDB().DumpSpentTickets(i)
if err != nil {
t.Errorf("DumpSpentTickets failure")
}
for _, h := range ticketsToSpendIn167 {
if _, ok := spentTix[h]; !ok {
t.Errorf("missing ticket %v that should have been missed "+
"or spent in block %v", h, i)
}
}
// Create snapshot of tmdb at block 168
CopyOfMapsAtBlock168, err = cloneTicketDB(chain.TMDB())
if err != nil {
t.Errorf("db cloning at block 168 failure! %v", err)
}
}
}
// Remove five blocks from HEAD~1
_, _, _, err = chain.TMDB().RemoveBlockToHeight(50)
if err != nil {
t.Errorf("error: %v", err)
}
// Test if the roll back was symmetric to the earlier snapshot
if !reflect.DeepEqual(chain.TMDB().DumpMapsPointer(), CopyOfMapsAtBlock50) {
t.Errorf("The td did not restore to a previous block height correctly!")
}
// Test rescanning a ticket db
err = chain.TMDB().RescanTicketDB()
if err != nil {
t.Errorf("rescanticketdb err: %v", err.Error())
}
// Remove all blocks and rescan too
_, _, _, err =
chain.TMDB().RemoveBlockToHeight(simNetParams.StakeEnabledHeight)
if err != nil {
t.Errorf("error: %v", err)
}
err = chain.TMDB().RescanTicketDB()
if err != nil {
t.Errorf("rescanticketdb err: %v", err.Error())
}
// Test if the db file storage was symmetric to the earlier snapshot
if !reflect.DeepEqual(chain.TMDB().DumpMapsPointer(), CopyOfMapsAtBlock168) {
t.Errorf("The td did not rescan to HEAD correctly!")
}
err = os.Mkdir("testdata/", os.FileMode(0700))
if err != nil {
t.Error(err)
}
// Store the ticket db to disk
err = chain.TMDB().Store("testdata/", "testtmdb")
if err != nil {
t.Errorf("error: %v", err)
}
var tmdb2 stake.TicketDB
err = tmdb2.LoadTicketDBs("testdata/", "testtmdb", simNetParams, chain.DB())
if err != nil {
t.Errorf("error: %v", err)
}
// Test if the db file storage was symmetric to previously rescanned one
if !reflect.DeepEqual(chain.TMDB().DumpMapsPointer(), tmdb2.DumpMapsPointer()) {
t.Errorf("The td did not rescan to a previous block height correctly!")
}
tmdb2.Close()
// Test dumping missing tickets from block 152
missedIn152, _ := chainhash.NewHashFromStr(
"84f7f866b0af1cc278cb8e0b2b76024a07542512c76487c83628c14c650de4fa")
chain.TMDB().RemoveBlockToHeight(152)
missedTix, err := chain.TMDB().DumpMissedTickets()
if err != nil {
t.Errorf("err dumping missed tix: %v", err.Error())
}
if _, exists := missedTix[*missedIn152]; !exists {
t.Errorf("couldn't finding missed tx 1 %v in tmdb @ block 152!",
missedIn152)
}
chain.TMDB().RescanTicketDB()
// Make sure that the revoked map contains the revoked tx
revokedSlice := []*chainhash.Hash{missedIn152}
revokedTix, err := chain.TMDB().DumpRevokedTickets()
if err != nil {
t.Errorf("err dumping missed tix: %v", err.Error())
}
if len(revokedTix) != 1 {
t.Errorf("revoked ticket map is wrong len, got %v, want %v",
len(revokedTix), 1)
}
_, wasMissedIn152 := revokedTix[*revokedSlice[0]]
ticketsRevoked := wasMissedIn152
if !ticketsRevoked {
t.Errorf("revoked ticket map did not include tickets missed in " +
"block 152 and later revoked")
}
os.RemoveAll("ticketdb_test")
os.Remove("./ticketdb_test.ver")
os.Remove("testdata/testtmdb")
os.Remove("testdata")
}
// --------------------------------------------------------------------------------
// TESTING VARIABLES BEGIN HERE
// simNetPowLimit is the highest proof of work value a Decred block
// can have for the simulation test network. It is the value 2^255 - 1.
var simNetPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// SimNetParams defines the network parameters for the simulation test Decred
// network. This network is similar to the normal test network except it is
// intended for private use within a group of individuals doing simulation
// testing. The functionality is intended to differ in that the only nodes
// which are specifically specified are used to create the network rather than
// following normal discovery rules. This is important as otherwise it would
// just turn into another public testnet.
var simNetParams = &chaincfg.Params{
Name: "simnet",
Net: wire.SimNet,
DefaultPort: "18555",
// Chain parameters
GenesisBlock: &simNetGenesisBlock,
GenesisHash: &simNetGenesisHash,
CurrentBlockVersion: 0,
PowLimit: simNetPowLimit,
PowLimitBits: 0x207fffff,
ResetMinDifficulty: false,
GenerateSupported: true,
MaximumBlockSize: 1000000,
TimePerBlock: time.Second * 1,
WorkDiffAlpha: 1,
WorkDiffWindowSize: 8,
WorkDiffWindows: 4,
TargetTimespan: time.Second * 1 * 8, // TimePerBlock * WindowSize
RetargetAdjustmentFactor: 4,
// Subsidy parameters.
BaseSubsidy: 50000000000,
MulSubsidy: 100,
DivSubsidy: 101,
ReductionInterval: 128,
WorkRewardProportion: 6,
StakeRewardProportion: 3,
BlockTaxProportion: 1,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,
// Mempool parameters
RelayNonStdTxs: true,
// Address encoding magics
PubKeyAddrID: [2]byte{0x27, 0x6f}, // starts with Sk
PubKeyHashAddrID: [2]byte{0x0e, 0x91}, // starts with Ss
PKHEdwardsAddrID: [2]byte{0x0e, 0x71}, // starts with Se
PKHSchnorrAddrID: [2]byte{0x0e, 0x53}, // starts with SS
ScriptHashAddrID: [2]byte{0x0e, 0x6c}, // starts with Sc
PrivateKeyID: [2]byte{0x23, 0x07}, // starts with Ps
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x20, 0xb9, 0x03}, // starts with sprv
HDPublicKeyID: [4]byte{0x04, 0x20, 0xbd, 0x3d}, // starts with spub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 115, // ASCII for s
// Decred PoS parameters
MinimumStakeDiff: 20000,
TicketPoolSize: 64,
TicketsPerBlock: 5,
TicketMaturity: 16,
TicketExpiry: 256, // 4*TicketPoolSize
CoinbaseMaturity: 16,
SStxChangeMaturity: 1,
TicketPoolSizeWeight: 4,
StakeDiffAlpha: 1,
StakeDiffWindowSize: 8,
StakeDiffWindows: 8,
MaxFreshStakePerBlock: 40, // 8*TicketsPerBlock
StakeEnabledHeight: 16 + 16, // CoinbaseMaturity + TicketMaturity
StakeValidationHeight: 16 + (64 * 2), // CoinbaseMaturity + TicketPoolSize*2
StakeBaseSigScript: []byte{0xDE, 0xAD, 0xBE, 0xEF},
// Decred organization related parameters
//
// "Dev org" address is a 3-of-3 P2SH going to wallet:
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// aardvark adroitness aardvark adroitness
// briefcase
// (seed 0x00000000000000000000000000000000000000000000000000000000000000)
//
// This same wallet owns the three ledger outputs for simnet.
//
// P2SH details for simnet dev org is below.
//
// address: Scc4ZC844nzuZCXsCFXUBXTLks2mD6psWom
// redeemScript: 532103e8c60c7336744c8dcc7b85c27789950fc52aa4e48f895ebbfb
// ac383ab893fc4c2103ff9afc246e0921e37d12e17d8296ca06a8f92a07fbe7857ed1d4
// f0f5d94e988f21033ed09c7fa8b83ed53e6f2c57c5fa99ed2230c0d38edf53c0340d0f
// c2e79c725a53ae
// (3-of-3 multisig)
// Pubkeys used:
// SkQmxbeuEFDByPoTj41TtXat8tWySVuYUQpd4fuNNyUx51tF1csSs
// SkQn8ervNvAUEX5Ua3Lwjc6BAuTXRznDoDzsyxgjYqX58znY7w9e4
// SkQkfkHZeBbMW8129tZ3KspEh1XBFC1btbkgzs6cjSyPbrgxzsKqk
//
OrganizationAddress: "ScuQxvveKGfpG1ypt6u27F99Anf7EW3cqhq",
BlockOneLedger: BlockOneLedgerSimNet,
}
// BlockOneLedgerSimNet is the block one output ledger for the simulation
// network. See below under "Decred organization related parameters" for
// information on how to spend these outputs.
var BlockOneLedgerSimNet = []*chaincfg.TokenPayout{
{Address: "Sshw6S86G2bV6W32cbc7EhtFy8f93rU6pae", Amount: 100000 * 1e8},
{Address: "SsjXRK6Xz6CFuBt6PugBvrkdAa4xGbcZ18w", Amount: 100000 * 1e8},
{Address: "SsfXiYkYkCoo31CuVQw428N6wWKus2ZEw5X", Amount: 100000 * 1e8},
}
var bigOne = new(big.Int).SetInt64(1)
// simNetGenesisHash is the hash of the first block in the block chain for the
// simulation test network.
var simNetGenesisHash = simNetGenesisBlock.BlockSha()
// simNetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the simulation test network. It is the same as the merkle root for
// the main network.
var simNetGenesisMerkleRoot = genesisMerkleRoot
// genesisCoinbaseTx legacy is the coinbase transaction for the genesis blocks for
// the regression test network and test network.
var genesisCoinbaseTxLegacy = wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */
0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */
0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */
0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */
0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/
0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */
0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/
0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0x00000000,
PkScript: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */
0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */
0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */
0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */
0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */
0x1d, 0x5f, 0xac, /* |._.| */
},
},
},
LockTime: 0,
Expiry: 0,
}
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = genesisCoinbaseTxLegacy.TxSha()
var regTestGenesisCoinbaseTx = wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */
0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */
0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */
0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */
0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/
0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */
0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/
0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0x00000000,
Version: 0x0000,
PkScript: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */
0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */
0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */
0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */
0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */
0x1d, 0x5f, 0xac, /* |._.| */
},
},
},
LockTime: 0,
Expiry: 0,
}
// simNetGenesisBlock defines the genesis block of the block chain which serves
// as the public transaction ledger for the simulation test network.
var simNetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}),
MerkleRoot: simNetGenesisMerkleRoot,
StakeRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}),
VoteBits: uint16(0x0000),
FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
Voters: uint16(0x0000),
FreshStake: uint8(0x00),
Revocations: uint8(0x00),
Timestamp: time.Unix(1401292357, 0), // 2009-01-08 20:54:25 -0600 CST
PoolSize: uint32(0),
Bits: 0x207fffff, // 545259519
SBits: int64(0x0000000000000000),
Nonce: 0x00000000,
Height: uint32(0),
},
Transactions: []*wire.MsgTx{&regTestGenesisCoinbaseTx},
STransactions: []*wire.MsgTx{},
}

1047
blockchain/stake/tickets.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,255 +7,63 @@ package blockchain
import (
"fmt"
"sort"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrutil"
)
// GetNextWinningTickets returns the next tickets eligible for spending as SSGen
// on the top block. It also returns the ticket pool size.
// This function is NOT safe for concurrent access.
func (b *BlockChain) GetNextWinningTickets() ([]chainhash.Hash, int, [6]byte,
error) {
winningTickets, poolSize, finalState, _, err :=
b.getWinningTicketsWithStore(b.bestNode)
if err != nil {
return nil, 0, [6]byte{}, err
}
// NextLotteryData returns the next tickets eligible for spending as SSGen
// on the top block. It also returns the ticket pool size and the PRNG
// state checksum.
//
// This function is safe for concurrent access.
func (b *BlockChain) NextLotteryData() ([]chainhash.Hash, int, [6]byte, error) {
b.chainLock.RLock()
defer b.chainLock.RUnlock()
return winningTickets, poolSize, finalState, nil
return b.bestNode.stakeNode.Winners(), b.bestNode.stakeNode.PoolSize(),
b.bestNode.stakeNode.FinalState(), nil
}
// getWinningTicketsWithStore is a helper function that returns winning tickets
// along with the ticket pool size and transaction store for the given node.
// Note that this function evaluates the lottery data predominantly for mining
// purposes; that is, it retrieves the lottery data which needs to go into
// the next block when mining on top of this block.
// This function is NOT safe for concurrent access.
func (b *BlockChain) getWinningTicketsWithStore(node *blockNode) ([]chainhash.Hash,
int, [6]byte, TicketStore, error) {
if node.height < b.chainParams.StakeEnabledHeight {
return []chainhash.Hash{}, 0, [6]byte{}, nil, nil
}
evalLotteryWinners := false
if node.height >= b.chainParams.StakeValidationHeight-1 {
evalLotteryWinners = true
}
block, err := b.getBlockFromHash(node.hash)
if err != nil {
return nil, 0, [6]byte{}, nil, err
}
headerB, err := node.header.Bytes()
if err != nil {
return nil, 0, [6]byte{}, nil, err
}
ticketStore, err := b.fetchTicketStore(node)
if err != nil {
return nil, 0, [6]byte{}, nil,
fmt.Errorf("Failed to generate ticket store for node %v; "+
"error given: %v", node.hash, err)
}
if ticketStore != nil {
view := NewUtxoViewpoint()
view.SetBestHash(node.hash)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
parent, err := b.getBlockFromHash(&node.header.PrevBlock)
if err != nil {
return nil, 0, [6]byte{}, nil, err
}
err = view.fetchInputUtxos(b.db, block, parent)
if err != nil {
return nil, 0, [6]byte{}, nil, err
}
}
// Sort the entire list of tickets lexicographically by sorting
// each bucket and then appending it to the list.
tpdBucketMap := make(map[uint8][]*TicketPatchData)
for _, tpd := range ticketStore {
// Bucket does not exist.
if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
tpdBucketMap[tpd.td.Prefix][0] = tpd
} else {
// Bucket exists.
data := tpdBucketMap[tpd.td.Prefix]
tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
}
}
totalTickets := 0
var sortedSlice []*stake.TicketData
for i := 0; i < stake.BucketsSize; i++ {
ltb, err := b.GenerateLiveTicketBucket(ticketStore, tpdBucketMap,
uint8(i))
if err != nil {
h := node.hash
str := fmt.Sprintf("Failed to generate a live ticket bucket "+
"to evaluate the lottery data for node %v, height %v! Error "+
"given: %v",
h,
node.height,
err.Error())
return nil, 0, [6]byte{}, nil, fmt.Errorf(str)
}
mapLen := len(ltb)
tempTdSlice := stake.NewTicketDataSlice(mapLen)
itr := 0 // Iterator
for _, td := range ltb {
tempTdSlice[itr] = td
itr++
totalTickets++
}
sort.Sort(tempTdSlice)
sortedSlice = append(sortedSlice, tempTdSlice...)
}
// Use the parent block's header to seed a PRNG that picks the
// lottery winners.
var winningTickets []chainhash.Hash
var finalState [6]byte
stateBuffer := make([]byte, 0,
(b.chainParams.TicketsPerBlock+1)*chainhash.HashSize)
if evalLotteryWinners {
ticketsPerBlock := int(b.chainParams.TicketsPerBlock)
prng := stake.NewHash256PRNG(headerB)
ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng)
if err != nil {
return nil, 0, [6]byte{}, nil, err
}
for _, idx := range ts {
winningTickets = append(winningTickets, sortedSlice[idx].SStxHash)
stateBuffer = append(stateBuffer, sortedSlice[idx].SStxHash[:]...)
}
lastHash := prng.StateHash()
stateBuffer = append(stateBuffer, lastHash[:]...)
copy(finalState[:], chainhash.HashFuncB(stateBuffer)[0:6])
}
return winningTickets, totalTickets, finalState, ticketStore, nil
}
// getWinningTicketsInclStore is a helper function for block validation that
// returns winning tickets along with the ticket pool size and transaction
// store for the given node.
// Note that this function is used for finding the lottery data when
// evaluating a block that builds on a tip, not for mining.
// This function is NOT safe for concurrent access.
func (b *BlockChain) getWinningTicketsInclStore(node *blockNode,
ticketStore TicketStore) ([]chainhash.Hash, int, [6]byte, error) {
// lotteryDataForNode is a helper function that returns winning tickets
// along with the ticket pool size and PRNG checksum for a given node.
//
// This function is NOT safe for concurrent access and MUST be called
// with the chainLock held for writes.
func (b *BlockChain) lotteryDataForNode(node *blockNode) ([]chainhash.Hash, int, [6]byte, error) {
if node.height < b.chainParams.StakeEnabledHeight {
return []chainhash.Hash{}, 0, [6]byte{}, nil
}
evalLotteryWinners := false
if node.height >= b.chainParams.StakeValidationHeight-1 {
evalLotteryWinners = true
}
parentHeaderB, err := node.parent.header.Bytes()
stakeNode, err := b.fetchStakeNode(node)
if err != nil {
return nil, 0, [6]byte{}, err
return []chainhash.Hash{}, 0, [6]byte{}, err
}
// Sort the entire list of tickets lexicographically by sorting
// each bucket and then appending it to the list.
tpdBucketMap := make(map[uint8][]*TicketPatchData)
for _, tpd := range ticketStore {
// Bucket does not exist.
if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
tpdBucketMap[tpd.td.Prefix][0] = tpd
} else {
// Bucket exists.
data := tpdBucketMap[tpd.td.Prefix]
tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
}
}
totalTickets := 0
var sortedSlice []*stake.TicketData
for i := 0; i < stake.BucketsSize; i++ {
ltb, err := b.GenerateLiveTicketBucket(ticketStore, tpdBucketMap,
uint8(i))
if err != nil {
h := node.hash
str := fmt.Sprintf("Failed to generate a live ticket bucket "+
"to evaluate the lottery data for node %v, height %v! Error "+
"given: %v",
h,
node.height,
err.Error())
return nil, 0, [6]byte{}, fmt.Errorf(str)
}
mapLen := len(ltb)
tempTdSlice := stake.NewTicketDataSlice(mapLen)
itr := 0 // Iterator
for _, td := range ltb {
tempTdSlice[itr] = td
itr++
totalTickets++
}
sort.Sort(tempTdSlice)
sortedSlice = append(sortedSlice, tempTdSlice...)
}
// Use the parent block's header to seed a PRNG that picks the
// lottery winners.
var winningTickets []chainhash.Hash
var finalState [6]byte
stateBuffer := make([]byte, 0,
(b.chainParams.TicketsPerBlock+1)*chainhash.HashSize)
if evalLotteryWinners {
ticketsPerBlock := int(b.chainParams.TicketsPerBlock)
prng := stake.NewHash256PRNG(parentHeaderB)
ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng)
if err != nil {
return nil, 0, [6]byte{}, err
}
for _, idx := range ts {
winningTickets = append(winningTickets, sortedSlice[idx].SStxHash)
stateBuffer = append(stateBuffer, sortedSlice[idx].SStxHash[:]...)
}
lastHash := prng.StateHash()
stateBuffer = append(stateBuffer, lastHash[:]...)
copy(finalState[:], chainhash.HashFuncB(stateBuffer)[0:6])
}
return winningTickets, totalTickets, finalState, nil
return stakeNode.Winners(), b.bestNode.stakeNode.PoolSize(),
b.bestNode.stakeNode.FinalState(), nil
}
// GetWinningTickets takes a node block hash and returns the next tickets
// eligible for spending as SSGen.
// lotteryDataForBlock takes a node block hash and returns the next tickets
// eligible for voting, the number of tickets in the ticket pool, and the
// final state of the PRNG.
//
// This function is safe for concurrent access.
func (b *BlockChain) GetWinningTickets(nodeHash chainhash.Hash) ([]chainhash.Hash,
int, [6]byte, error) {
b.chainLock.Lock()
defer b.chainLock.Unlock()
// This function is NOT safe for concurrent access and must have the chainLock
// held for write access.
func (b *BlockChain) lotteryDataForBlock(hash *chainhash.Hash) ([]chainhash.Hash, int, [6]byte, error) {
var node *blockNode
if n, exists := b.index[nodeHash]; exists {
if n, exists := b.index[*hash]; exists {
node = n
} else {
node, _ = b.findNode(&nodeHash)
node, _ = b.findNode(hash)
}
if node == nil {
return nil, 0, [6]byte{}, fmt.Errorf("node doesn't exist")
}
winningTickets, poolSize, finalState, _, err :=
b.getWinningTicketsWithStore(node)
winningTickets, poolSize, finalState, err := b.lotteryDataForNode(node)
if err != nil {
return nil, 0, [6]byte{}, err
}
@ -263,22 +71,162 @@ func (b *BlockChain) GetWinningTickets(nodeHash chainhash.Hash) ([]chainhash.Has
return winningTickets, poolSize, finalState, nil
}
// GetMissedTickets returns a list of currently missed tickets.
// LotteryDataForBlock returns lottery data for a given block in the block
// chain, including side chain blocks.
//
// It is safe for concurrent access.
// TODO An optimization can be added that only calls the read lock if the
// block is not minMemoryStakeNodes blocks before the current best node.
// This is because all the data for these nodes can be assumed to be
// in memory.
func (b *BlockChain) LotteryDataForBlock(hash *chainhash.Hash) ([]chainhash.Hash, int, [6]byte, error) {
b.chainLock.Lock()
defer b.chainLock.Unlock()
return b.lotteryDataForBlock(hash)
}
// LiveTickets returns all currently live tickets from the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) GetMissedTickets() []chainhash.Hash {
missedTickets := b.tmdb.GetTicketHashesForMissed()
func (b *BlockChain) LiveTickets() ([]chainhash.Hash, error) {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
return missedTickets
return sn.LiveTickets(), nil
}
// DB passes the pointer to the database. It is only to be used by testing.
func (b *BlockChain) DB() database.DB {
return b.db
// MissedTickets returns all currently missed tickets from the stake database.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) MissedTickets() ([]chainhash.Hash, error) {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
return sn.MissedTickets(), nil
}
// TMDB passes the pointer to the ticket database. It is only to be used by
// testing.
func (b *BlockChain) TMDB() *stake.TicketDB {
return b.tmdb
// TicketsWithAddress returns a slice of ticket hashes that are currently live
// corresponding to the given address.
//
// This function is safe for concurrent access.
func (b *BlockChain) TicketsWithAddress(address dcrutil.Address) ([]chainhash.Hash, error) {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
tickets := sn.LiveTickets()
var ticketsWithAddr []chainhash.Hash
err := b.db.View(func(dbTx database.Tx) error {
var err error
for _, hash := range tickets {
utxo, err := dbFetchUtxoEntry(dbTx, &hash)
if err != nil {
return err
}
_, addrs, _, err :=
txscript.ExtractPkScriptAddrs(txscript.DefaultScriptVersion,
utxo.PkScriptByIndex(0), b.chainParams)
if addrs[0].EncodeAddress() == address.EncodeAddress() {
ticketsWithAddr = append(ticketsWithAddr, hash)
}
}
return err
})
if err != nil {
return nil, err
}
return ticketsWithAddr, nil
}
// CheckLiveTicket returns whether or not a ticket exists in the live ticket
// treap of the best node.
//
// This function is safe for concurrent access.
func (b *BlockChain) CheckLiveTicket(hash chainhash.Hash) bool {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
return sn.ExistsLiveTicket(hash)
}
// CheckLiveTickets returns whether or not a slice of tickets exist in the live
// ticket treap of the best node.
//
// This function is safe for concurrent access.
func (b *BlockChain) CheckLiveTickets(hashes []chainhash.Hash) []bool {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
existsSlice := make([]bool, len(hashes))
for i := range hashes {
existsSlice[i] = sn.ExistsLiveTicket(hashes[i])
}
return existsSlice
}
// CheckExpiredTicket returns whether or not a ticket was ever expired.
//
// This function is safe for concurrent access.
func (b *BlockChain) CheckExpiredTicket(hash chainhash.Hash) bool {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
return sn.ExistsExpiredTicket(hash)
}
// CheckExpiredTickets returns whether or not a ticket in a slice of
// tickets was ever expired.
//
// This function is safe for concurrent access.
func (b *BlockChain) CheckExpiredTickets(hashes []chainhash.Hash) []bool {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
existsSlice := make([]bool, len(hashes))
for i := range hashes {
existsSlice[i] = sn.ExistsExpiredTicket(hashes[i])
}
return existsSlice
}
// TicketPoolValue returns the current value of all the locked funds in the
// ticket pool.
//
// This function is safe for concurrent access. All live tickets are at least
// 256 blocks deep on mainnet, so the UTXO set should generally always have
// the asked for transactions.
func (b *BlockChain) TicketPoolValue() (dcrutil.Amount, error) {
b.chainLock.RLock()
sn := b.bestNode.stakeNode
b.chainLock.RUnlock()
var amt int64
err := b.db.View(func(dbTx database.Tx) error {
var err error
for _, hash := range sn.LiveTickets() {
utxo, err := dbFetchUtxoEntry(dbTx, &hash)
if err != nil {
return err
}
amt += utxo.sparseOutputs[0].amount
}
return err
})
if err != nil {
return 0, err
}
return dcrutil.Amount(amt), nil
}

222
blockchain/stakenode.go Normal file
View File

@ -0,0 +1,222 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"fmt"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
)
// nodeAtHeightFromTopNode goes backwards through a node until it a reaches
// the node with a desired block height; it returns this block. The benefit is
// this works for both the main chain and the side chain.
func (b *BlockChain) nodeAtHeightFromTopNode(node *blockNode,
toTraverse int64) (*blockNode, error) {
oldNode := node
var err error
for i := 0; i < int(toTraverse); i++ {
// Get the previous block node.
oldNode, err = b.getPrevNodeFromNode(oldNode)
if err != nil {
return nil, err
}
if oldNode == nil {
return nil, fmt.Errorf("unable to obtain previous node; " +
"ancestor is genesis block")
}
}
return oldNode, nil
}
// fetchNewTicketsForNode fetches the list of newly maturing tickets for a
// given node by traversing backwards through its parents until it finds the
// block that contains the original tickets to mature.
//
// This function is NOT safe for concurrent access and must be called with
// the chainLock held for writes.
func (b *BlockChain) fetchNewTicketsForNode(node *blockNode) ([]chainhash.Hash, error) {
// If we're before the stake enabled height, there can be no
// tickets in the live ticket pool.
if node.height < b.chainParams.StakeEnabledHeight {
return []chainhash.Hash{}, nil
}
// If we already cached the tickets, simply return the cached list.
// It's important to make the distinction here that nil means the
// value was never looked up, while an empty slice of pointers means
// that there were no new tickets at this height.
if node.newTickets != nil {
return node.newTickets, nil
}
// Calculate block number for where new tickets matured from and retrieve
// this block from DB or in memory if it's a sidechain.
matureNode, err := b.nodeAtHeightFromTopNode(node,
int64(b.chainParams.TicketMaturity))
if err != nil {
return nil, err
}
matureBlock, errBlock := b.fetchBlockFromHash(&matureNode.hash)
if errBlock != nil {
return nil, errBlock
}
tickets := []chainhash.Hash{}
for _, stx := range matureBlock.STransactions() {
if is, _ := stake.IsSStx(stx); is {
h := stx.Sha()
tickets = append(tickets, *h)
}
}
// Set the new tickets in memory so that they exist for future
// reference in the node.
node.newTickets = tickets
return tickets, nil
}
// fetchStakeNode will scour the blockchain from the best block, for which we
// know that there is valid stake node. The first step is finding a path to the
// ancestor, or, if on a side chain, the path to the common ancestor, followed
// by the path to the sidechain node. After this path is established, the
// algorithm walks along the path, regenerating and storing intermediate nodes
// as it does so, until the final stake node of interest is populated with the
// correct data.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) fetchStakeNode(node *blockNode) (*stake.Node, error) {
// If we already have the stake node fetched, returned the cached result.
// Stake nodes are immutable.
if node.stakeNode != nil {
return node.stakeNode, nil
}
// If the parent stake node is cached, connect the stake node
// from there.
if node.parent != nil {
if node.stakeNode == nil && node.parent.stakeNode != nil {
var err error
if node.newTickets == nil {
node.newTickets, err = b.fetchNewTicketsForNode(node)
if err != nil {
return nil, err
}
}
node.stakeNode, err = node.parent.stakeNode.ConnectNode(node.header,
node.ticketsSpent,
node.ticketsRevoked,
node.newTickets)
if err != nil {
return nil, err
}
return node.stakeNode, nil
}
}
// We need to generate a path to the stake node and restore it
// it through the entire path. The bestNode stake node must
// always be filled in, so assume it is safe to begin working
// backwards from there.
detachNodes, attachNodes, err := b.getReorganizeNodes(node)
if err != nil {
return nil, err
}
current := b.bestNode
// Move backwards through the main chain, undoing the ticket
// treaps for each block. The database is passed because the
// undo data and new tickets data for each block may not yet
// be filled in and may require the database to look up.
err = b.db.View(func(dbTx database.Tx) error {
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
var errLocal error
if n.stakeNode == nil {
n.stakeNode, errLocal =
current.stakeNode.DisconnectNode(n.header,
n.stakeUndoData, n.newTickets, dbTx)
}
if errLocal != nil {
return errLocal
}
current = n
}
return nil
})
if err != nil {
return nil, err
}
// Detach the final block and get the filled in node for the fork
// point.
err = b.db.View(func(dbTx database.Tx) error {
var errLocal error
if current.parent.stakeNode == nil {
current.parent.stakeNode, errLocal =
current.stakeNode.DisconnectNode(current.header,
current.stakeUndoData, current.newTickets, dbTx)
}
if errLocal != nil {
return errLocal
}
current = current.parent
return nil
})
if err != nil {
return nil, err
}
// The node is at a fork point in the block chain, so just return
// this stake node.
if attachNodes.Len() == 0 {
if current.hash != node.hash ||
current.height != node.height {
return nil, AssertError("failed to restore stake node to " +
"fork point when fetching")
}
return current.stakeNode, nil
}
// The requested node is on a side chain, so we need to apply the
// transactions and spend information from each of the nodes to attach.
// Not that side chain ticket data and undo data is always stored
// in memory, so there is not need to use the database here.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
if n.stakeNode == nil {
if n.newTickets == nil {
n.newTickets, err = b.fetchNewTicketsForNode(n)
if err != nil {
return nil, err
}
}
n.stakeNode, err = current.stakeNode.ConnectNode(n.header,
n.ticketsSpent, n.ticketsRevoked, n.newTickets)
if err != nil {
return nil, err
}
}
current = n
}
return current.stakeNode, nil
}

BIN
blockchain/testdata/testexpiry.bz2 vendored Normal file

Binary file not shown.

View File

@ -1,691 +0,0 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"errors"
"fmt"
"sort"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrutil"
)
// TicketStatus is used to indicate the state of a ticket in the ticket store and
// the ticket database. Non-existing is included because it's possible to have
// a ticket not exist in a sidechain that exists in the mainchain (and thus exists
// in the ticket database), and so you need to indicate this in the ticket store.
// It could also point to a ticket that's been missed and eliminated from the
// ticket db by SSRtx.
type TicketStatus int
// Possible values for TicketStatus
const (
TiNonexisting = iota
TiSpent
TiAvailable
TiMissed
TiRevoked
TiError
)
// TicketPatchData contains contextual information about tickets, namely their
// ticket data and whether or not they are spent.
type TicketPatchData struct {
td *stake.TicketData
ts TicketStatus
err error
}
// NewTicketPatchData creates a new TicketPatchData struct.
func NewTicketPatchData(td *stake.TicketData,
ts TicketStatus,
err error) *TicketPatchData {
return &TicketPatchData{td, ts, err}
}
// TicketStore is used to store a patch of the ticket db for use in validating the
// block header and subsequently the block reward. It allows you to observe the
// ticket db from the point-of-view of different points in the chain.
// TicketStore is basically like an extremely inefficient version of the ticket
// database that isn't designed to be able to be easily rolled back, which is fine
// because we're only going to use it in ephermal cases.
type TicketStore map[chainhash.Hash]*TicketPatchData
// GenerateLiveTicketBucket takes ticket patch data and a bucket number as input,
// then recreates a ticket bucket from the patch and the current database state.
func (b *BlockChain) GenerateLiveTicketBucket(ticketStore TicketStore,
tpdBucketMap map[uint8][]*TicketPatchData, bucket uint8) (stake.SStxMemMap,
error) {
bucketTickets := make(stake.SStxMemMap)
// Check the ticketstore for live tickets and add these to the bucket if
// their ticket number matches.
for _, tpd := range tpdBucketMap[bucket] {
if tpd.ts == TiAvailable {
bucketTickets[tpd.td.SStxHash] = tpd.td
}
}
// Check the ticket database for live tickets; prune live tickets that
// have been spent/missed/were otherwise accounted for.
liveTicketsFromDb, err := b.tmdb.DumpLiveTickets(bucket)
if err != nil {
return nil, err
}
for hash, td := range liveTicketsFromDb {
if _, exists := ticketStore[hash]; exists {
continue
}
bucketTickets[hash] = td
}
return bucketTickets, nil
}
// GenerateMissedTickets takes ticket patch data as input, then recreates the
// missed tickets bucket from the patch and the current database state.
func (b *BlockChain) GenerateMissedTickets(tixStore TicketStore) (stake.SStxMemMap,
error) {
missedTickets := make(stake.SStxMemMap)
// Check the ticketstore for live tickets and add these to the bucket if
// their ticket number matches.
for hash, tpd := range tixStore {
if tpd.ts == TiMissed {
missedTickets[hash] = tpd.td
}
}
// Check the ticket database for live tickets; prune live tickets that
// have been spent/missed/were otherwise accounted for.
missedTicketsFromDb, err := b.tmdb.DumpMissedTickets()
if err != nil {
return nil, err
}
for hash, td := range missedTicketsFromDb {
if _, exists := tixStore[hash]; exists {
continue
}
missedTickets[hash] = td
}
return missedTickets, nil
}
// connectTickets updates the passed map by removing removing any tickets
// from the ticket pool that have been considered spent or missed in this block
// according to the block header. Then, it connects all the newly mature tickets
// to the passed map.
func (b *BlockChain) connectTickets(tixStore TicketStore, node *blockNode,
block *dcrutil.Block, view *UtxoViewpoint) error {
if tixStore == nil {
return fmt.Errorf("nil ticket store")
}
// Nothing to do if tickets haven't yet possibly matured.
height := node.height
if height < b.chainParams.StakeEnabledHeight {
return nil
}
parentBlock, err := b.getBlockFromHash(&node.header.PrevBlock)
if err != nil {
return err
}
revocations := node.header.Revocations
tM := int64(b.chainParams.TicketMaturity)
// Skip a number of validation steps before we requiring chain
// voting.
if node.height >= b.chainParams.StakeValidationHeight {
// We need the missed tickets bucket from the original perspective of
// the node.
missedTickets, err := b.GenerateMissedTickets(tixStore)
if err != nil {
return err
}
// TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if
// validated) for this node.
parent, err := b.getBlockFromHash(&node.header.PrevBlock)
if err != nil {
return err
}
regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits,
dcrutil.BlockValid)
thisNodeStakeViewpoint := ViewpointPrevInvalidStake
if regularTxTreeValid {
thisNodeStakeViewpoint = ViewpointPrevValidStake
}
view.SetStakeViewpoint(thisNodeStakeViewpoint)
err = view.fetchInputUtxos(b.db, block, parent)
if err != nil {
errStr := fmt.Sprintf("fetchInputUtxos failed for incoming "+
"node %v; error given: %v", node.hash, err)
return errors.New(errStr)
}
// PART 1: Spend/miss winner tickets
// Iterate through all the SSGen (vote) tx in the block and add them to
// a map of tickets that were actually used.
spentTicketsFromBlock := make(map[chainhash.Hash]bool)
numberOfSSgen := 0
for _, staketx := range block.STransactions() {
if is, _ := stake.IsSSGen(staketx); is {
msgTx := staketx.MsgTx()
sstxIn := msgTx.TxIn[1] // sstx input
sstxHash := sstxIn.PreviousOutPoint.Hash
originUTXO := view.LookupEntry(&sstxHash)
if originUTXO == nil {
str := fmt.Sprintf("unable to find input transaction "+
"%v for transaction %v", sstxHash, staketx.Sha())
return ruleError(ErrMissingTx, str)
}
// Check maturity of ticket; we can only spend the ticket after it
// hits maturity at height + tM + 1.
sstxHeight := originUTXO.BlockHeight()
if (height - sstxHeight) < (tM + 1) {
blockSha := block.Sha()
errStr := fmt.Sprintf("Error: A ticket spend as an SSGen in "+
"block height %v was immature! Block sha %v",
height,
blockSha)
return errors.New(errStr)
}
// Fill out the ticket data.
spentTicketsFromBlock[sstxHash] = true
numberOfSSgen++
}
}
// Obtain the TicketsPerBlock many tickets that were selected this round,
// then check these against the tickets that were actually used to make
// sure that any SSGen actually match the selected tickets. Commit the
// spent or missed tickets to the ticket store after.
spentAndMissedTickets := make(TicketStore)
tixSpent := 0
tixMissed := 0
// Sort the entire list of tickets lexicographically by sorting
// each bucket and then appending it to the list. Start by generating
// a prefix matched map of tickets to speed up the lookup.
tpdBucketMap := make(map[uint8][]*TicketPatchData)
for _, tpd := range tixStore {
// Bucket does not exist.
if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
tpdBucketMap[tpd.td.Prefix][0] = tpd
} else {
// Bucket exists.
data := tpdBucketMap[tpd.td.Prefix]
tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
}
}
totalTickets := 0
var sortedSlice []*stake.TicketData
for i := 0; i < stake.BucketsSize; i++ {
ltb, err := b.GenerateLiveTicketBucket(tixStore, tpdBucketMap,
uint8(i))
if err != nil {
h := node.hash
str := fmt.Sprintf("Failed to generate live ticket bucket "+
"%v for node %v, height %v! Error: %v",
i,
h,
node.height,
err.Error())
return fmt.Errorf(str)
}
mapLen := len(ltb)
tempTdSlice := stake.NewTicketDataSlice(mapLen)
itr := 0 // Iterator
for _, td := range ltb {
tempTdSlice[itr] = td
itr++
totalTickets++
}
sort.Sort(tempTdSlice)
sortedSlice = append(sortedSlice, tempTdSlice...)
}
// Use the parent block's header to seed a PRNG that picks the
// lottery winners.
ticketsPerBlock := int(b.chainParams.TicketsPerBlock)
pbhB, err := parentBlock.MsgBlock().Header.Bytes()
if err != nil {
return err
}
prng := stake.NewHash256PRNG(pbhB)
ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng)
if err != nil {
return err
}
ticketsToSpendOrMiss := make([]*stake.TicketData, ticketsPerBlock,
ticketsPerBlock)
for i, idx := range ts {
ticketsToSpendOrMiss[i] = sortedSlice[idx]
}
// Spend or miss these tickets by checking for their existence in the
// passed spentTicketsFromBlock map.
for _, ticket := range ticketsToSpendOrMiss {
// Move the ticket from active tickets map into the used tickets
// map if the ticket was spent.
wasSpent, _ := spentTicketsFromBlock[ticket.SStxHash]
if wasSpent {
tpd := NewTicketPatchData(ticket, TiSpent, nil)
spentAndMissedTickets[ticket.SStxHash] = tpd
tixSpent++
} else { // Ticket missed being spent and --> false or nil
tpd := NewTicketPatchData(ticket, TiMissed, nil)
spentAndMissedTickets[ticket.SStxHash] = tpd
tixMissed++
}
}
// This error is thrown if for some reason there exists an SSGen in
// the block that doesn't spend a ticket from the eligible list of
// tickets, thus making it invalid.
if tixSpent != numberOfSSgen {
errStr := fmt.Sprintf("an invalid number %v "+
"tickets was spent, but %v many tickets should "+
"have been spent!", tixSpent, numberOfSSgen)
return errors.New(errStr)
}
if tixMissed != (ticketsPerBlock - numberOfSSgen) {
errStr := fmt.Sprintf("an invalid number %v "+
"tickets was missed, but %v many tickets should "+
"have been missed!", tixMissed,
ticketsPerBlock-numberOfSSgen)
return errors.New(errStr)
}
if (tixSpent + tixMissed) != int(b.chainParams.TicketsPerBlock) {
errStr := fmt.Sprintf("an invalid number %v "+
"tickets was spent and missed, but TicketsPerBlock %v many "+
"tickets should have been spent!", tixSpent,
ticketsPerBlock)
return errors.New(errStr)
}
// Calculate all the tickets expiring this block and mark them as missed.
tpdBucketMap = make(map[uint8][]*TicketPatchData)
for _, tpd := range tixStore {
// Bucket does not exist.
if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
tpdBucketMap[tpd.td.Prefix][0] = tpd
} else {
// Bucket exists.
data := tpdBucketMap[tpd.td.Prefix]
tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
}
}
toExpireHeight := node.height - int64(b.chainParams.TicketExpiry)
if !(toExpireHeight < int64(b.chainParams.StakeEnabledHeight)) {
for i := 0; i < stake.BucketsSize; i++ {
// Generate the live ticket bucket.
ltb, err := b.GenerateLiveTicketBucket(tixStore,
tpdBucketMap, uint8(i))
if err != nil {
return err
}
for _, ticket := range ltb {
if ticket.BlockHeight == toExpireHeight {
tpd := NewTicketPatchData(ticket, TiMissed, nil)
spentAndMissedTickets[ticket.SStxHash] = tpd
}
}
}
}
// Merge the ticket store patch containing the spent and missed tickets
// with the ticket store.
for hash, tpd := range spentAndMissedTickets {
tixStore[hash] = tpd
}
// At this point our tixStore now contains all the spent and missed tx
// as per this block.
// PART 2: Remove tickets that were missed and are now revoked.
// Iterate through all the SSGen (vote) tx in the block and add them to
// a map of tickets that were actually used.
revocationsFromBlock := make(map[chainhash.Hash]struct{})
numberOfSSRtx := 0
for _, staketx := range block.STransactions() {
if is, _ := stake.IsSSRtx(staketx); is {
msgTx := staketx.MsgTx()
sstxIn := msgTx.TxIn[0] // sstx input
sstxHash := sstxIn.PreviousOutPoint.Hash
// Fill out the ticket data.
revocationsFromBlock[sstxHash] = struct{}{}
numberOfSSRtx++
}
}
if numberOfSSRtx != int(revocations) {
errStr := fmt.Sprintf("an invalid revocations %v was calculated "+
"the block header indicates %v instead", numberOfSSRtx,
revocations)
return errors.New(errStr)
}
// Lookup the missed ticket. If we find it in the patch data,
// modify the patch data so that it doesn't exist.
// Otherwise, just modify load the missed ticket data from
// the ticket db and create patch data based on that.
for hash := range revocationsFromBlock {
ticketWasMissed := false
if td, is := missedTickets[hash]; is {
maturedHeight := td.BlockHeight
// Check maturity of ticket; we can only spend the ticket after it
// hits maturity at height + tM + 2.
if height < maturedHeight+2 {
blockSha := block.Sha()
errStr := fmt.Sprintf("Error: A ticket spend as an "+
"SSRtx in block height %v was immature! Block sha %v",
height,
blockSha)
return errors.New(errStr)
}
ticketWasMissed = true
}
if !ticketWasMissed {
errStr := fmt.Sprintf("SSRtx spent missed sstx %v, "+
"but that missed sstx could not be found!",
hash)
return errors.New(errStr)
}
}
}
// PART 3: Add newly maturing tickets
// This is the only chunk we need to do for blocks appearing before
// stake validation height.
// Calculate block number for where new tickets are maturing from and retrieve
// this block from db.
// Get the block that is maturing.
matureNode, err := b.getNodeAtHeightFromTopNode(node, tM)
if err != nil {
return err
}
matureBlock, errBlock := b.getBlockFromHash(matureNode.hash)
if errBlock != nil {
return errBlock
}
// Maturing tickets are from the maturingBlock; fill out the ticket patch data
// and then push them to the tixStore.
for _, stx := range matureBlock.STransactions() {
if is, _ := stake.IsSStx(stx); is {
// Calculate the prefix for pre-sort.
sstxHash := *stx.Sha()
prefix := uint8(sstxHash[0])
// Fill out the ticket data.
td := stake.NewTicketData(sstxHash,
prefix,
chainhash.Hash{},
height,
false, // not missed
false) // not expired
tpd := NewTicketPatchData(td,
TiAvailable,
nil)
tixStore[*stx.Sha()] = tpd
}
}
return nil
}
// disconnectTransactions updates the passed map by undoing transaction and
// spend information for all transactions in the passed block. Only
// transactions in the passed map are updated.
// This function should only ever have to disconnect transactions from the main
// chain, so most of the calls are directly the the tmdb which contains all this
// data in an organized bucket.
func (b *BlockChain) disconnectTickets(tixStore TicketStore, node *blockNode,
block *dcrutil.Block) error {
tM := int64(b.chainParams.TicketMaturity)
height := node.height
// Nothing to do if tickets haven't yet possibly matured.
if height < b.chainParams.StakeEnabledHeight {
return nil
}
// PART 1: Remove newly maturing tickets
// Calculate block number for where new tickets matured from and retrieve
// this block from db.
matureNode, err := b.getNodeAtHeightFromTopNode(node, tM)
if err != nil {
return err
}
matureBlock, errBlock := b.getBlockFromHash(matureNode.hash)
if errBlock != nil {
return errBlock
}
// Store pointers to empty ticket data in the ticket store and mark them as
// non-existing.
for _, stx := range matureBlock.STransactions() {
if is, _ := stake.IsSStx(stx); is {
// Leave this pointing to nothing, as the ticket technically does not
// exist. It may exist when we add blocks later, but we can fill it
// out then.
td := &stake.TicketData{}
tpd := NewTicketPatchData(td,
TiNonexisting,
nil)
tixStore[*stx.Sha()] = tpd
}
}
// PART 2: Unrevoke any SSRtx in this block and restore them as
// missed tickets.
for _, stx := range block.STransactions() {
if is, _ := stake.IsSSRtx(stx); is {
// Move the revoked ticket to missed tickets. Obtain the
// revoked ticket data from the ticket database.
msgTx := stx.MsgTx()
sstxIn := msgTx.TxIn[0] // sstx input
sstxHash := sstxIn.PreviousOutPoint.Hash
td := b.tmdb.GetRevokedTicket(sstxHash)
if td == nil {
return fmt.Errorf("Failed to find revoked ticket %v in tmdb",
sstxHash)
}
tpd := NewTicketPatchData(td,
TiMissed,
nil)
tixStore[sstxHash] = tpd
}
}
// PART 3: Unspend or unmiss all tickets spent/missed/expired at this block.
// Query the stake db for used tickets (spentTicketDb), which includes all of
// the spent and missed tickets.
spentTickets, errDump := b.tmdb.DumpSpentTickets(height)
if errDump != nil {
return errDump
}
// Move all of these tickets into the ticket store as available tickets.
for hash, td := range spentTickets {
tpd := NewTicketPatchData(td,
TiAvailable,
nil)
tixStore[hash] = tpd
}
return nil
}
// fetchTicketStore fetches ticket data from the point of view of the given node.
// For example, a given node might be down a side chain where a ticket hasn't been
// spent from its point of view even though it might have been spent in the main
// chain (or another side chain). Another scenario is where a ticket exists from
// the point of view of the main chain, but doesn't exist in a side chain that
// branches before the block that contains the ticket on the main chain.
func (b *BlockChain) fetchTicketStore(node *blockNode) (TicketStore, error) {
tixStore := make(TicketStore)
// Get the previous block node. This function is used over simply
// accessing node.parent directly as it will dynamically create previous
// block nodes as needed. This helps allow only the pieces of the chain
// that are needed to remain in memory.
prevNode, err := b.getPrevNodeFromNode(node)
if err != nil {
return nil, err
}
// If we haven't selected a best chain yet or we are extending the main
// (best) chain with a new block, just use the ticket database we already
// have.
if b.bestNode == nil || (prevNode != nil &&
prevNode.hash.IsEqual(b.bestNode.hash)) {
return nil, nil
}
// We don't care about nodes before stake enabled height.
if node.height < b.chainParams.StakeEnabledHeight {
return nil, nil
}
// The requested node is either on a side chain or is a node on the main
// chain before the end of it. In either case, we need to undo the
// transactions and spend information for the blocks which would be
// disconnected during a reorganize to the point of view of the
// node just before the requested node.
detachNodes, attachNodes := b.getReorganizeNodes(node)
if err != nil {
return nil, err
}
view := NewUtxoViewpoint()
view.SetBestHash(b.bestNode.hash)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
block, err := b.getBlockFromHash(n.hash)
if err != nil {
return nil, err
}
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
if err != nil {
return nil, err
}
// Load all of the spent txos for the block from the spend
// journal.
var stxos []spentTxOut
err = b.db.View(func(dbTx database.Tx) error {
stxos, err = dbFetchSpendJournalEntry(dbTx, block, parent)
return err
})
if err != nil {
return nil, err
}
// Quick sanity test.
if len(stxos) != countSpentOutputs(block, parent) {
return nil, AssertError(fmt.Sprintf("retrieved %v stxos when "+
"trying to disconnect block %v (height %v), yet counted %v "+
"many spent utxos when fetching ticket store", len(stxos),
block.Sha(), block.Height(), countSpentOutputs(block, parent)))
}
err = b.disconnectTransactions(view, block, parent, stxos)
if err != nil {
return nil, err
}
err = b.disconnectTickets(tixStore, n, block)
if err != nil {
return nil, err
}
}
// The ticket store is now accurate to either the node where the
// requested node forks off the main chain (in the case where the
// requested node is on a side chain), or the requested node itself if
// the requested node is an old node on the main chain. Entries in the
// attachNodes list indicate the requested node is on a side chain, so
// if there are no nodes to attach, we're done.
if attachNodes.Len() == 0 {
return tixStore, nil
}
// The requested node is on a side chain, so we need to apply the
// transactions and spend information from each of the nodes to attach.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
block, exists := b.blockCache[*n.hash]
if !exists {
return nil, fmt.Errorf("unable to find block %v in "+
"side chain cache for ticket db patch construction",
n.hash)
}
// The number of blocks below this block but above the root of the fork
err = b.connectTickets(tixStore, n, block, view)
if err != nil {
return nil, err
}
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
if err != nil {
return nil, err
}
var stxos []spentTxOut
err = b.connectTransactions(view, block, parent, &stxos)
if err != nil {
return nil, err
}
view.SetBestHash(node.hash)
}
return tixStore, nil
}

117
blockchain/upgrade.go Normal file
View File

@ -0,0 +1,117 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"github.com/decred/dcrd/blockchain/internal/progresslog"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
)
// upgradeToVersion2 upgrades a version 1 blockchain to version 2, allowing
// use of the new on-disk ticket database.
func (b *BlockChain) upgradeToVersion2() error {
log.Infof("Initializing upgrade to database version 2")
best := b.BestSnapshot()
progressLogger := progresslog.NewBlockProgressLogger("Upgraded", log)
// The upgrade is atomic, so there is no need to set the flag that
// the database is undergoing an upgrade here. Get the stake node
// for the genesis block, and then begin connecting stake nodes
// incrementally.
err := b.db.Update(func(dbTx database.Tx) error {
bestStakeNode, errLocal := stake.InitDatabaseState(dbTx, b.chainParams)
if errLocal != nil {
return errLocal
}
parent, errLocal := dbFetchBlockByHeight(dbTx, 0)
if errLocal != nil {
return errLocal
}
for i := int64(1); i <= best.Height; i++ {
block, errLocal := dbFetchBlockByHeight(dbTx, i)
if errLocal != nil {
return errLocal
}
// If we need the tickets, fetch them too.
var newTickets []chainhash.Hash
if i >= b.chainParams.StakeEnabledHeight {
matureHeight := i - int64(b.chainParams.TicketMaturity)
matureBlock, errLocal := dbFetchBlockByHeight(dbTx, matureHeight)
if errLocal != nil {
return errLocal
}
for _, stx := range matureBlock.STransactions() {
if is, _ := stake.IsSStx(stx); is {
h := stx.Sha()
newTickets = append(newTickets, *h)
}
}
}
// Iteratively connect the stake nodes in memory.
header := block.MsgBlock().Header
bestStakeNode, errLocal = bestStakeNode.ConnectNode(header,
ticketsSpentInBlock(block), ticketsRevokedInBlock(block),
newTickets)
if errLocal != nil {
return errLocal
}
// Write the top block stake node to the database.
errLocal = stake.WriteConnectedBestNode(dbTx, bestStakeNode,
*best.Hash)
if errLocal != nil {
return errLocal
}
// Write the best block node when we reach it.
if i == best.Height {
b.bestNode.stakeNode = bestStakeNode
b.bestNode.stakeUndoData = bestStakeNode.UndoData()
b.bestNode.newTickets = newTickets
b.bestNode.ticketsSpent = ticketsSpentInBlock(block)
b.bestNode.ticketsRevoked = ticketsRevokedInBlock(block)
}
progressLogger.LogBlockHeight(block, parent)
parent = block
}
// Write the new database version.
b.dbInfo.version = 2
errLocal = dbPutDatabaseInfo(dbTx, b.dbInfo)
if errLocal != nil {
return errLocal
}
return nil
})
if err != nil {
return err
}
log.Infof("Upgrade to new stake database was successful!")
return nil
}
// upgrade applies all possible upgrades to the blockchain database iteratively,
// updating old clients to the newest version.
func (b *BlockChain) upgrade() error {
if b.dbInfo.version == 1 {
err := b.upgradeToVersion2()
if err != nil {
return err
}
}
return nil
}

View File

@ -1081,11 +1081,11 @@ func (b *BlockChain) FetchUtxoView(tx *dcrutil.Tx, treeValid bool) (*UtxoViewpoi
view := NewUtxoViewpoint()
if treeValid {
view.SetStakeViewpoint(ViewpointPrevValidRegular)
block, err := b.getBlockFromHash(b.bestNode.hash)
block, err := b.fetchBlockFromHash(&b.bestNode.hash)
if err != nil {
return nil, err
}
parent, err := b.getBlockFromHash(&b.bestNode.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&b.bestNode.header.PrevBlock)
if err != nil {
return nil, err
}
@ -1101,7 +1101,7 @@ func (b *BlockChain) FetchUtxoView(tx *dcrutil.Tx, treeValid bool) (*UtxoViewpoi
}
}
}
view.SetBestHash(b.bestNode.hash)
view.SetBestHash(&b.bestNode.hash)
// Create a set of needed transactions based on those referenced by the
// inputs of the passed transaction. Also, add the passed transaction

View File

@ -7,7 +7,6 @@ package blockchain
import (
"bytes"
"errors"
"fmt"
"math"
"math/big"
@ -529,7 +528,7 @@ func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource,
}
// The number of votes must be the same as the number declared in the
// header. The same is true for tickets and revocations.
// header. The same is true for tickets and revocations.
// Build merkle tree and ensure the calculated merkle root matches the
// entry in the block header. This also has the effect of caching all
@ -756,15 +755,13 @@ func (b *BlockChain) checkDupTxs(txSet []*dcrutil.Tx,
}
// CheckBlockStakeSanity performs a series of checks on a block to ensure that the
// information from the block's header about stake is sane. For instance, the
// information from the block's header about stake is sane. For instance, the
// number of SSGen tx must be equal to voters.
// TODO: We can consider breaking this into two functions and making some of these
// checks go through in processBlock, however if a block has demonstrable PoW it
// seems unlikely that it will have stake errors (because the miner is then just
// wasting hash power).
func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
stakeValidationHeight int64, node *blockNode, block *dcrutil.Block,
parent *dcrutil.Block, chainParams *chaincfg.Params) error {
func (b *BlockChain) CheckBlockStakeSanity(stakeValidationHeight int64, node *blockNode, block *dcrutil.Block, parent *dcrutil.Block, chainParams *chaincfg.Params) error {
// Setup variables.
stakeTransactions := block.STransactions()
@ -782,6 +779,11 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
stakeEnabledHeight := chainParams.StakeEnabledHeight
parentStakeNode, err := b.fetchStakeNode(node.parent)
if err != nil {
return err
}
// Do some preliminary checks on each stake transaction to ensure they
// are sane before continuing.
ssGens := 0 // Votes
@ -821,6 +823,21 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
return ruleError(ErrInvalidEarlyStakeTx, errStr)
}
// Check the stake difficulty.
calcSBits, err := b.calcNextRequiredStakeDifficulty(node.parent)
if err != nil {
errStr := fmt.Sprintf("couldn't calculate stake difficulty for "+
"block node %v: %v",
node.hash, calcSBits)
return ruleError(ErrUnexpectedDifficulty, errStr)
}
if block.MsgBlock().Header.SBits != calcSBits {
errStr := fmt.Sprintf("block had unexpected stake difficulty "+
"(%v given, %v expected)",
block.MsgBlock().Header.SBits, calcSBits)
return ruleError(ErrUnexpectedDifficulty, errStr)
}
// ----------------------------------------------------------------------------
// SStx Tx Handling
// ----------------------------------------------------------------------------
@ -851,9 +868,9 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
}
// 2. Ensure the the number of SStx tx in the block is the same as FreshStake
// in the header. This is also tested for in checkBlockSanity.
// in the header. This is also tested for in checkBlockSanity.
// 3. Check to make sure we haven't exceeded max number of new SStx. May not
// 3. Check to make sure we haven't exceeded max number of new SStx. May not
// need this check, as the above one should fail if you overflow uint8.
if numSStxTx > int(chainParams.MaxFreshStakePerBlock) {
errStr := fmt.Sprintf("Error in stake consensus: the number of SStx tx "+
@ -875,19 +892,12 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
}
// Check the ticket pool size.
_, calcPoolSize, _, err := b.getWinningTicketsInclStore(node, tixStore)
if err != nil {
log.Tracef("failed to retrieve poolsize for stake "+
"consensus: %v", err.Error())
return err
}
if calcPoolSize != poolSize {
if parentStakeNode.PoolSize() != poolSize {
errStr := fmt.Sprintf("Error in stake consensus: the poolsize "+
"in block %v was %v, however we expected %v",
node.hash,
poolSize,
calcPoolSize)
parentStakeNode.PoolSize())
return ruleError(ErrPoolSize, errStr)
}
@ -941,13 +951,9 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
// 1. Retrieve an emulated ticket database of SStxMemMaps from both the
// ticket database and the ticket store.
ticketsWhichCouldBeUsed := make(map[chainhash.Hash]struct{}, ticketsPerBlock)
ticketSlice, calcPoolSize, finalStateCalc, err :=
b.getWinningTicketsInclStore(node, tixStore)
if err != nil {
errStr := fmt.Sprintf("unexpected getWinningTicketsInclStore error: %v",
err.Error())
return errors.New(errStr)
}
ticketSlice := parentStakeNode.Winners()
calcPoolSize := parentStakeNode.PoolSize()
finalStateCalc := parentStakeNode.FinalState()
// 2. Obtain the tickets which could have been used on the block for votes
// and then check below to make sure that these were indeed the tickets
@ -1007,9 +1013,9 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
}
// 4. Check and make sure that we have the same number of SSGen tx as we do
// votes. Already checked in checkBlockSanity.
// votes. Already checked in checkBlockSanity.
// 5. Check for too many voters. Already checked in checkBlockSanity.
// 5. Check for too many voters. Already checked in checkBlockSanity.
// 6. Determine if TxTreeRegular should be valid or not, and then check it
// against what is provided in the block header.
@ -1047,18 +1053,6 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
// revocations.
// 4. Check for revocation overflows.
numSSRtxTx := 0
missedTickets, err := b.GenerateMissedTickets(tixStore)
if err != nil {
h := block.Sha()
str := fmt.Sprintf("Failed to generate missed tickets data "+
"for block %v, height %v! Error given: %v",
h,
block.Height(),
err.Error())
return errors.New(str)
}
for _, staketx := range stakeTransactions {
if is, _ := stake.IsSSRtx(staketx); is {
numSSRtxTx++
@ -1070,7 +1064,7 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
ticketMissed := false
if _, exists := missedTickets[sstxHash]; exists {
if parentStakeNode.ExistsMissedTicket(sstxHash) {
ticketMissed = true
}
@ -1084,9 +1078,9 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
}
// 3. Check and make sure that we have the same number of SSRtx tx as we do
// revocations. Already checked in checkBlockSanity.
// revocations. Already checked in checkBlockSanity.
// 4. Check for revocation overflows. Should be impossible given the above
// 4. Check for revocation overflows. Should be impossible given the above
// check, but check anyway.
if numSSRtxTx > math.MaxUint8 {
errStr := fmt.Sprintf("Error in stake consensus: the number of SSRtx tx "+
@ -1103,9 +1097,9 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore,
// 2. Check and make sure that the ticketpool size is calculated correctly
// after account for spent, missed, and expired tickets.
// 1. Ensure that all stake transactions are accounted for. If not, this
// 1. Ensure that all stake transactions are accounted for. If not, this
// indicates that there was some sort of non-standard stake tx present
// in the block. This is already checked before, but check again here.
// in the block. This is already checked before, but check again here.
stakeTxSum := numSStxTx + numSSGenTx + numSSRtxTx
if stakeTxSum != len(stakeTransactions) {
@ -1260,7 +1254,7 @@ func CheckTransactionInputs(subsidyCache *SubsidyCache, tx *dcrutil.Tx,
sstxHash := sstxIn.PreviousOutPoint.Hash
// Calculate the theoretical stake vote subsidy by extracting the vote
// height. Should be impossible because IsSSGen requires this byte string
// height. Should be impossible because IsSSGen requires this byte string
// to be a certain number of bytes.
_, heightVotingOn, err := stake.SSGenBlockVotedOn(tx)
if err != nil {
@ -1284,7 +1278,7 @@ func CheckTransactionInputs(subsidyCache *SubsidyCache, tx *dcrutil.Tx,
// to make sure that the reward has been calculated correctly from the
// subsidy and the inputs.
// We also need to make sure that the SSGen outputs that are P2PKH go
// to the addresses specified in the original SSTx. Check that too.
// to the addresses specified in the original SSTx. Check that too.
utxoEntrySstx, exists := utxoView.entries[sstxHash]
if !exists || utxoEntrySstx == nil {
errStr := fmt.Sprintf("Unable to find input sstx transaction "+
@ -1293,7 +1287,7 @@ func CheckTransactionInputs(subsidyCache *SubsidyCache, tx *dcrutil.Tx,
}
// While we're here, double check to make sure that the input is from an
// SStx. By doing so, you also ensure the first output is OP_SSTX tagged.
// SStx. By doing so, you also ensure the first output is OP_SSTX tagged.
if utxoEntrySstx.TransactionType() != stake.TxTypeSStx {
errStr := fmt.Sprintf("Input transaction %v for SSGen was not "+
"an SStx tx (given input: %v)", txHash, sstxHash)
@ -1424,7 +1418,7 @@ func CheckTransactionInputs(subsidyCache *SubsidyCache, tx *dcrutil.Tx,
}
// While we're here, double check to make sure that the input is from an
// SStx. By doing so, you also ensure the first output is OP_SSTX tagged.
// SStx. By doing so, you also ensure the first output is OP_SSTX tagged.
if utxoEntrySstx.TransactionType() != stake.TxTypeSStx {
errStr := fmt.Sprintf("Input transaction %v for SSRtx %v was not"+
"an SStx tx", txHash, sstxHash)
@ -1808,7 +1802,7 @@ func CountP2SHSigOps(tx *dcrutil.Tx, isCoinBaseTx bool, isStakeBaseTx bool,
return 0, nil
}
// Stakebase (SSGen) transactions have no P2SH inputs. Same with SSRtx,
// Stakebase (SSGen) transactions have no P2SH inputs. Same with SSRtx,
// but they will still pass the checks below.
if isStakeBaseTx {
return 0, nil
@ -1868,7 +1862,7 @@ func CountP2SHSigOps(tx *dcrutil.Tx, isCoinBaseTx bool, isStakeBaseTx bool,
}
// checkNumSigOps Checks the number of P2SH signature operations to make
// sure they don't overflow the limits. It takes a cumulative number of sig
// sure they don't overflow the limits. It takes a cumulative number of sig
// ops as an argument and increments will each call.
// TxTree true == Regular, false == Stake
func checkNumSigOps(tx *dcrutil.Tx, utxoView *UtxoViewpoint, index int,
@ -1907,7 +1901,7 @@ func checkNumSigOps(tx *dcrutil.Tx, utxoView *UtxoViewpoint, index int,
}
// checkStakeBaseAmounts calculates the total amount given as subsidy from
// single stakebase transactions (votes) within a block. This function skips a
// single stakebase transactions (votes) within a block. This function skips a
// ton of checks already performed by CheckTransactionInputs.
func checkStakeBaseAmounts(subsidyCache *SubsidyCache, height int64,
params *chaincfg.Params, txs []*dcrutil.Tx, utxoView *UtxoViewpoint) error {
@ -1949,7 +1943,7 @@ func checkStakeBaseAmounts(subsidyCache *SubsidyCache, height int64,
}
// getStakeBaseAmounts calculates the total amount given as subsidy from
// the collective stakebase transactions (votes) within a block. This
// the collective stakebase transactions (votes) within a block. This
// function skips a ton of checks already performed by
// CheckTransactionInputs.
func getStakeBaseAmounts(txs []*dcrutil.Tx, utxoView *UtxoViewpoint) (int64, error) {
@ -2200,7 +2194,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *dcrutil.Block,
// allowed a block that is no longer valid. However, since the
// implementation only currently uses memory for the side chain blocks,
// it isn't currently necessary.
parentBlock, err := b.getBlockFromHash(&node.header.PrevBlock)
parentBlock, err := b.fetchBlockFromHash(&node.header.PrevBlock)
if err != nil {
return ruleError(ErrMissingParent, err.Error())
}
@ -2226,21 +2220,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *dcrutil.Block,
return err
}
// Check to ensure consensus via the PoS ticketing system versus the
// informations stored in the header.
ticketStore, err := b.fetchTicketStore(node.parent)
if err != nil {
log.Tracef("Failed to generate ticket store for incoming "+
"node %v; error given: %v", node.hash, err)
return err
}
err = b.CheckBlockStakeSanity(ticketStore,
b.chainParams.StakeValidationHeight,
node,
block,
parentBlock,
b.chainParams)
err = b.CheckBlockStakeSanity(b.chainParams.StakeValidationHeight, node,
block, parentBlock, b.chainParams)
if err != nil {
log.Tracef("CheckBlockStakeSanity failed for incoming "+
"node %v; error given: %v", node.hash, err)
@ -2393,7 +2374,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *dcrutil.Block,
// Update the best hash for view to include this block since all of its
// transactions have been connected.
utxoView.SetBestHash(node.hash)
utxoView.SetBestHash(&node.hash)
return nil
}
@ -2408,15 +2389,17 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *dcrutil.Block,
//
// This function is safe for concurrent access.
func (b *BlockChain) CheckConnectBlock(block *dcrutil.Block) error {
b.chainLock.Lock()
defer b.chainLock.Unlock()
parentHash := block.MsgBlock().Header.PrevBlock
prevNode, err := b.findNode(&parentHash)
if err != nil {
return ruleError(ErrMissingParent, err.Error())
}
var voteBitsStake []uint16
newNode := newBlockNode(&block.MsgBlock().Header, block.Sha(),
block.Height(), voteBitsStake)
block.Height(), ticketsSpentInBlock(block), ticketsRevokedInBlock(block))
newNode.parent = prevNode
newNode.workSum.Add(prevNode.workSum, newNode.workSum)
if prevNode != nil {
@ -2427,9 +2410,9 @@ func (b *BlockChain) CheckConnectBlock(block *dcrutil.Block) error {
// If we are extending the main (best) chain with a new block,
// just use the ticket database we already have.
if b.bestNode == nil || (prevNode != nil &&
prevNode.hash.IsEqual(b.bestNode.hash)) {
prevNode.hash == b.bestNode.hash) {
view := NewUtxoViewpoint()
view.SetBestHash(prevNode.hash)
view.SetBestHash(&prevNode.hash)
return b.checkConnectBlock(newNode, block, view, nil)
}
@ -2438,23 +2421,23 @@ func (b *BlockChain) CheckConnectBlock(block *dcrutil.Block) error {
// transactions and spend information for the blocks which would be
// disconnected during a reorganize to the point of view of the
// node just before the requested node.
detachNodes, attachNodes := b.getReorganizeNodes(prevNode)
detachNodes, attachNodes, err := b.getReorganizeNodes(prevNode)
if err != nil {
return err
}
view := NewUtxoViewpoint()
view.SetBestHash(b.bestNode.hash)
view.SetBestHash(&b.bestNode.hash)
view.SetStakeViewpoint(ViewpointPrevValidInitial)
var stxos []spentTxOut
for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
block, err := b.getBlockFromHash(n.hash)
block, err := b.fetchBlockFromHash(&n.hash)
if err != nil {
return err
}
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&n.header.PrevBlock)
if err != nil {
return err
}
@ -2490,14 +2473,14 @@ func (b *BlockChain) CheckConnectBlock(block *dcrutil.Block) error {
// transactions and spend information from each of the nodes to attach.
for e := attachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode)
block, exists := b.blockCache[*n.hash]
block, exists := b.blockCache[n.hash]
if !exists {
return fmt.Errorf("unable to find block %v in "+
"side chain cache for utxo view construction",
n.hash)
}
parent, err := b.getBlockFromHash(&n.header.PrevBlock)
parent, err := b.fetchBlockFromHash(&n.header.PrevBlock)
if err != nil {
return err
}

View File

@ -14,6 +14,7 @@ import (
type blockProgressLogger struct {
receivedLogBlocks int64
receivedLogTx int64
receivedLogSTx int64
lastBlockLogTime time.Time
subsystemLogger btclog.Logger
@ -33,15 +34,15 @@ func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *block
}
}
// LogBlockHeight logs a new block height as an information message to show
// logBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockProgressLogger) LogBlockHeight(block *dcrutil.Block) {
func (b *blockProgressLogger) logBlockHeight(block *dcrutil.Block) {
b.Lock()
defer b.Unlock()
b.receivedLogBlocks++
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
b.receivedLogSTx += int64(len(block.MsgBlock().STransactions))
now := time.Now()
duration := now.Sub(b.lastBlockLogTime)
@ -62,12 +63,19 @@ func (b *blockProgressLogger) LogBlockHeight(block *dcrutil.Block) {
if b.receivedLogTx == 1 {
txStr = "transaction"
}
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
stxStr := "stake transactions"
if b.receivedLogTx == 1 {
stxStr = "stake transaction"
}
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, %d %s, height "+
"%d, %s)",
b.progressAction, b.receivedLogBlocks, blockStr, tDuration,
b.receivedLogTx, txStr, b.receivedLogSTx, stxStr, block.Height(),
block.MsgBlock().Header.Timestamp)
b.receivedLogBlocks = 0
b.receivedLogTx = 0
b.receivedLogSTx = 0
b.lastBlockLogTime = now
}

View File

@ -9,7 +9,6 @@ import (
"bytes"
"container/list"
"encoding/gob"
"errors"
"fmt"
"io/ioutil"
"math/rand"
@ -20,7 +19,6 @@ import (
"time"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/dbnamespace"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
@ -57,6 +55,15 @@ const (
// maxRequestedTxns is the maximum number of requested transactions
// shas to store in memory.
maxRequestedTxns = wire.MaxInvPerMsg
// maxLotteryDataBlockDelta is maximum number of blocks from the current
// best block to cut off block lottery calculation data for. Below
// bestBlockHeight-maxLotteryDataBlockDelta, block lottery data will
// not be calculated. This helps to reduce exhaustion attacks that
// might arise from sending old orphan blocks and forcing nodes to
// do expensive lottery data look ups for these blocks. It is
// equivalent to 24 hours of work on mainnet.
maxLotteryDataBlockDelta = 288
)
// zeroHash is the zero value hash (all zeros). It is defined as a convenience.
@ -233,40 +240,11 @@ type forceReorganizationMsg struct {
reply chan forceReorganizationResponse
}
// getLotterDataResponse is a response sent to the reply channel of a
// getLotteryDataMsg query.
type getLotterDataResponse struct {
finalState [6]byte
poolSize uint32
winningTickets []chainhash.Hash
err error
}
// getLotteryDataMsg is a message type to be sent across the message
// channel for requesting lottery data about some block.
type getLotteryDataMsg struct {
hash chainhash.Hash
reply chan getLotterDataResponse
}
// checkMissedTicketsResponse is a response sent to the reply channel of a
// checkMissedTicketsMsg query.
type checkMissedTicketsResponse struct {
missedTickets map[chainhash.Hash]bool
}
// checkMissedTicketsMsg is a message type to be sent across the message
// channel used for checking whether or not a list of tickets has been missed.
type checkMissedTicketsMsg struct {
tickets []chainhash.Hash
reply chan checkMissedTicketsResponse
}
// getTopBlockResponse is a response to the request for the block at HEAD of the
// blockchain. We need to be able to obtain this from blockChain for mining
// purposes.
type getTopBlockResponse struct {
block dcrutil.Block
block *dcrutil.Block
err error
}
@ -320,19 +298,6 @@ type isCurrentMsg struct {
reply chan bool
}
// missedTicketsMsg handles a request for the list of currently missed tickets
// from the ticket database.
type missedTicketsMsg struct {
reply chan missedTicketsResponse
}
// missedTicketsResponse is a response sent to the reply channel of a
// ticketBucketsMsg.
type missedTicketsResponse struct {
Tickets stake.SStxMemMap
err error
}
// pauseMsg is a message type to be sent across the message channel for
// pausing the block manager. This effectively provides the caller with
// exclusive access over the manager until a receive is performed on the
@ -341,62 +306,6 @@ type pauseMsg struct {
unpause <-chan struct{}
}
// ticketsForAddressMsg handles a request for obtaining all the current
// tickets corresponding to some address.
type ticketsForAddressMsg struct {
Address dcrutil.Address
reply chan ticketsForAddressResponse
}
// ticketsForAddressResponse is a response to the reply channel of a
// ticketsForAddressMsg.
type ticketsForAddressResponse struct {
Tickets []chainhash.Hash
err error
}
// existsLiveTicketMsg handles a request for obtaining whether or not a
// ticket exists in the live tickets map of the blockchain stake database.
type existsLiveTicketMsg struct {
hash *chainhash.Hash
reply chan existsLiveTicketResponse
}
// existsLiveTicketResponse is a response to the reply channel of a
// existsLiveTicketMsg.
type existsLiveTicketResponse struct {
Exists bool
err error
}
// existsLiveTicketsMsg handles a request for obtaining whether or not a ticket
// from a slice of tickets exists in the live tickets map of the blockchain stake
// database.
type existsLiveTicketsMsg struct {
hashes []*chainhash.Hash
reply chan existsLiveTicketsResponse
}
// existsLiveTicketsResponse is a response to the reply channel of a
// existsLiveTicketsMsg.
type existsLiveTicketsResponse struct {
Exists []bool
err error
}
// liveTicketsMsg handles a request for obtaining the current ticket hashes
// in the live ticket pool.
type liveTicketsMsg struct {
reply chan liveTicketsResponse
}
// liveTicketsResponse is a response to the reply channel of a
// liveTicketsMsg.
type liveTicketsResponse struct {
Live []*chainhash.Hash
err error
}
// getCurrentTemplateMsg handles a request for the current mining block template.
type getCurrentTemplateMsg struct {
reply chan getCurrentTemplateResponse
@ -466,7 +375,7 @@ type chainState struct {
nextStakeDifficulty int64
winningTickets []chainhash.Hash
missedTickets []chainhash.Hash
curBlockHeader *wire.BlockHeader
curBlockHeader wire.BlockHeader
pastMedianTime time.Time
pastMedianTimeErr error
}
@ -525,21 +434,13 @@ func (c *chainState) CurrentlyMissed() []chainhash.Hash {
// next block as inputs for SSGen.
//
// This function is safe for concurrent access.
func (c *chainState) GetTopBlockHeader() *wire.BlockHeader {
func (c *chainState) GetTopBlockHeader() wire.BlockHeader {
c.Lock()
defer c.Unlock()
return c.curBlockHeader
}
// BlockLotteryData refers to cached data that is generated when a block
// is inserted, so that it doesn't later need to be recalculated.
type BlockLotteryData struct {
ntfnData *WinningTicketsNtfnData
poolSize uint32
finalState [6]byte
}
// blockManager provides a concurrency safe block manager for handling all
// incoming blocks.
type blockManager struct {
@ -563,15 +464,19 @@ type blockManager struct {
wg sync.WaitGroup
quit chan struct{}
blockLotteryDataCache map[chainhash.Hash]*BlockLotteryData
blockLotteryDataCacheMutex *sync.Mutex
// The following fields are used for headers-first mode.
headersFirstMode bool
headerList *list.List
startHeader *list.Element
nextCheckpoint *chaincfg.Checkpoint
// lotteryDataBroadcastMutex is a mutex protecting the map
// that checks if block lottery data has been broadcasted
// yet for any given block, so notifications are never
// duplicated.
lotteryDataBroadcast map[chainhash.Hash]struct{}
lotteryDataBroadcastMutex sync.Mutex
cachedCurrentTemplate *BlockTemplate
cachedParentTemplate *BlockTemplate
AggressiveMining bool
@ -604,7 +509,7 @@ func (b *blockManager) updateChainState(newestHash *chainhash.Hash,
nextStakeDiff int64,
winningTickets []chainhash.Hash,
missedTickets []chainhash.Hash,
curBlockHeader *wire.BlockHeader) {
curBlockHeader wire.BlockHeader) {
b.chainState.Lock()
defer b.chainState.Unlock()
@ -1284,20 +1189,22 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
} else {
// When the block is not an orphan, log information about it and
// update the chain state.
b.progressLogger.LogBlockHeight(bmsg.block)
b.progressLogger.logBlockHeight(bmsg.block)
r := b.server.rpcServer
// Query the DB for the winning SStx for the next top block if we've
// reached stake validation height. Broadcast them if this is the first
// time determining them.
b.blockLotteryDataCacheMutex.Lock()
broadcastWinners := false
lotteryData := new(BlockLotteryData)
_, exists := b.blockLotteryDataCache[*blockSha]
if !exists {
winningTickets, poolSize, finalState, err :=
b.chain.GetWinningTickets(*blockSha)
// Determine if this block is recent enough that we need to calculate
// block lottery data for it.
_, bestHeight := b.chainState.Best()
blockHeight := int64(bmsg.block.MsgBlock().Header.Height)
tooOldForLotteryData := blockHeight <=
(bestHeight - maxLotteryDataBlockDelta)
if !tooOldForLotteryData {
// Query the DB for the winning SStx for the next top block if we've
// reached stake validation height. Broadcast them if this is the
// first time determining them and we're synced to the latest
// checkpoint.
winningTickets, _, _, err :=
b.chain.LotteryDataForBlock(blockSha)
if err != nil && int64(bmsg.block.MsgBlock().Header.Height) >=
b.server.chainParams.StakeValidationHeight-1 {
bmgrLog.Errorf("Failed to get next winning tickets: %v", err)
@ -1305,29 +1212,26 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
code, reason := errToRejectErr(err)
bmsg.peer.PushRejectMsg(wire.CmdBlock, code, reason,
blockSha, false)
b.blockLotteryDataCacheMutex.Unlock()
return
}
// Push winning tickets notifications if we need to.
winningTicketsNtfn := &WinningTicketsNtfnData{
*blockSha,
int64(bmsg.block.MsgBlock().Header.Height),
winningTickets}
lotteryData = &BlockLotteryData{
winningTicketsNtfn,
uint32(poolSize),
finalState,
BlockHash: *blockSha,
BlockHeight: int64(bmsg.block.MsgBlock().Header.Height),
Tickets: winningTickets}
b.lotteryDataBroadcastMutex.Lock()
_, beenNotified := b.lotteryDataBroadcast[*blockSha]
b.lotteryDataBroadcastMutex.Unlock()
if !beenNotified && r != nil &&
int64(bmsg.block.MsgBlock().Header.Height) >
b.server.chainParams.LatestCheckpointHeight() {
r.ntfnMgr.NotifyWinningTickets(winningTicketsNtfn)
b.lotteryDataBroadcastMutex.Lock()
b.lotteryDataBroadcast[*blockSha] = struct{}{}
b.lotteryDataBroadcastMutex.Unlock()
}
b.blockLotteryDataCache[*blockSha] = lotteryData
broadcastWinners = true
b.blockLotteryDataCacheMutex.Unlock()
} else {
lotteryData, _ = b.blockLotteryDataCache[*blockSha]
b.blockLotteryDataCacheMutex.Unlock()
}
if r != nil && broadcastWinners {
// Rebroadcast the existing data to WS clients.
r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData)
}
if onMainChain {
@ -1346,7 +1250,11 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
best := b.chain.BestSnapshot()
// Query the DB for the missed tickets for the next top block.
missedTickets := b.chain.GetMissedTickets()
missedTickets, err := b.chain.MissedTickets()
if err != nil {
bmgrLog.Warnf("Failed to get missed tickets "+
"for best block %v: %v", best.Hash, err)
}
// Retrieve the current block header.
curBlockHeader := b.chain.GetCurrentBlockHeader()
@ -1371,10 +1279,16 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
b.server.txMemPool.PruneExpiredTx(best.Height)
}
b.updateChainState(best.Hash, best.Height,
lotteryData.finalState, lotteryData.poolSize,
nextStakeDiff, lotteryData.ntfnData.Tickets,
missedTickets, curBlockHeader)
winningTickets, poolSize, finalState, err :=
b.chain.LotteryDataForBlock(blockSha)
if err != nil {
bmgrLog.Warnf("Failed to get determine lottery "+
"data for new best block: %v", err)
}
b.updateChainState(best.Hash, best.Height, finalState,
uint32(poolSize), nextStakeDiff, winningTickets,
missedTickets, *curBlockHeader)
// Update this peer's latest block height, for future
// potential sync node candidancy.
@ -1919,20 +1833,9 @@ out:
// side chain or have caused a reorg.
best := b.chain.BestSnapshot()
// Fetch the required lottery data from the cache;
// it must already be there.
b.blockLotteryDataCacheMutex.Lock()
lotteryData, exists := b.blockLotteryDataCache[*best.Hash]
if !exists {
b.blockLotteryDataCacheMutex.Unlock()
msg.reply <- forceReorganizationResponse{
err: fmt.Errorf("Failed to find lottery data in "+
"cache while attempting reorganize to block %v",
best.Hash),
}
continue
}
b.blockLotteryDataCacheMutex.Unlock()
// Fetch the required lottery data.
winningTickets, poolSize, finalState, err :=
b.chain.LotteryDataForBlock(best.Hash)
// Update registered websocket clients on the
// current stake difficulty.
@ -1955,18 +1858,22 @@ out:
b.server.txMemPool.PruneExpiredTx(best.Height)
}
missedTickets := b.chain.GetMissedTickets()
missedTickets, err := b.chain.MissedTickets()
if err != nil {
bmgrLog.Warnf("Failed to get missed tickets"+
": %v", err)
}
curBlockHeader := b.chain.GetCurrentBlockHeader()
b.updateChainState(best.Hash,
best.Height,
lotteryData.finalState,
lotteryData.poolSize,
finalState,
uint32(poolSize),
nextStakeDiff,
lotteryData.ntfnData.Tickets,
winningTickets,
missedTickets,
curBlockHeader)
*curBlockHeader)
}
msg.reply <- forceReorganizationResponse{
@ -1974,7 +1881,7 @@ out:
}
case getBlockFromHashMsg:
b, err := b.chain.GetBlockFromHash(&msg.hash)
b, err := b.chain.FetchBlockFromHash(&msg.hash)
msg.reply <- getBlockFromHashResponse{
block: b,
err: err,
@ -1987,16 +1894,6 @@ out:
err: err,
}
case getLotteryDataMsg:
winningTickets, poolSize, finalState, err :=
b.chain.GetWinningTickets(msg.hash)
msg.reply <- getLotterDataResponse{
finalState: finalState,
poolSize: uint32(poolSize),
winningTickets: winningTickets,
err: err,
}
case getTopBlockMsg:
b, err := b.chain.GetTopBlock()
msg.reply <- getTopBlockResponse{
@ -2016,16 +1913,19 @@ out:
continue
}
// Get the winning tickets. If they've yet to be broadcasted,
// broadcast them.
b.blockLotteryDataCacheMutex.Lock()
broadcastWinners := false
lotteryData := new(BlockLotteryData)
_, exists := b.blockLotteryDataCache[*msg.block.Sha()]
if !exists {
winningTickets, poolSize, finalState, err :=
b.chain.GetWinningTickets(*msg.block.Sha())
// Get the winning tickets if the block is not an
// orphan and if it's recent. If they've yet to be
// broadcasted, broadcast them.
_, bestHeight := b.chainState.Best()
blockHeight := int64(msg.block.MsgBlock().Header.Height)
tooOldForLotteryData := blockHeight <=
(bestHeight - maxLotteryDataBlockDelta)
if !isOrphan && !tooOldForLotteryData {
b.lotteryDataBroadcastMutex.Lock()
_, beenNotified := b.lotteryDataBroadcast[*msg.block.Sha()]
b.lotteryDataBroadcastMutex.Unlock()
winningTickets, _, _, err :=
b.chain.LotteryDataForBlock(msg.block.Sha())
if err != nil && int64(msg.block.MsgBlock().Header.Height) >=
b.server.chainParams.StakeValidationHeight-1 {
bmgrLog.Warnf("Stake failure in lottery tickets "+
@ -2034,33 +1934,30 @@ out:
isOrphan: false,
err: err,
}
b.blockLotteryDataCacheMutex.Unlock()
continue
}
lotteryData.poolSize = uint32(poolSize)
lotteryData.finalState = finalState
lotteryData.ntfnData = &WinningTicketsNtfnData{
*msg.block.Sha(),
int64(msg.block.MsgBlock().Header.Height),
winningTickets}
b.blockLotteryDataCache[*msg.block.Sha()] = lotteryData
broadcastWinners = true
} else {
lotteryData, _ = b.blockLotteryDataCache[*msg.block.Sha()]
}
r := b.server.rpcServer
if r != nil && !isOrphan && broadcastWinners &&
(msg.block.Height() >=
b.server.chainParams.StakeValidationHeight-1) {
// Notify registered websocket clients of newly
// eligible tickets to vote on.
if _, is := b.blockLotteryDataCache[*msg.block.Sha()]; !is {
r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData)
// eligible tickets to vote on if needed. Only
// do this if we're above the latest checkpoint
// height.
r := b.server.rpcServer
if r != nil && !isOrphan && !beenNotified &&
(msg.block.Height() >=
b.server.chainParams.StakeValidationHeight-1) &&
(msg.block.Height() >
b.server.chainParams.LatestCheckpointHeight()) {
ntfnData := &WinningTicketsNtfnData{
*msg.block.Sha(),
int64(msg.block.MsgBlock().Header.Height),
winningTickets}
r.ntfnMgr.NotifyWinningTickets(ntfnData)
b.lotteryDataBroadcastMutex.Lock()
b.lotteryDataBroadcast[*msg.block.Sha()] = struct{}{}
b.lotteryDataBroadcastMutex.Unlock()
}
}
b.blockLotteryDataCacheMutex.Unlock()
// If the block added to the main chain, then we need to
// update the tip locally on block manager.
@ -2078,29 +1975,45 @@ out:
bmgrLog.Warnf("Failed to get next stake difficulty "+
"calculation: %v", err)
} else {
r.ntfnMgr.NotifyStakeDifficulty(
&StakeDifficultyNtfnData{
*best.Hash,
best.Height,
nextStakeDiff,
})
b.server.txMemPool.PruneStakeTx(nextStakeDiff,
best.Height)
b.server.txMemPool.PruneExpiredTx(
best.Height)
r := b.server.rpcServer
if r != nil {
r.ntfnMgr.NotifyStakeDifficulty(
&StakeDifficultyNtfnData{
*best.Hash,
best.Height,
nextStakeDiff,
})
}
}
missedTickets := b.chain.GetMissedTickets()
b.server.txMemPool.PruneStakeTx(nextStakeDiff,
best.Height)
b.server.txMemPool.PruneExpiredTx(
best.Height)
missedTickets, err := b.chain.MissedTickets()
if err != nil {
bmgrLog.Warnf("Failed to get missing tickets for "+
"incoming block %v: %v", best.Hash, err)
}
curBlockHeader := b.chain.GetCurrentBlockHeader()
winningTickets, poolSize, finalState, err :=
b.chain.LotteryDataForBlock(msg.block.Sha())
if err != nil {
bmgrLog.Warnf("Failed to determine block "+
"lottery data for incoming best block %v: %v",
best.Hash, err)
}
b.updateChainState(best.Hash,
best.Height,
lotteryData.finalState,
lotteryData.poolSize,
finalState,
uint32(poolSize),
nextStakeDiff,
lotteryData.ntfnData.Tickets,
winningTickets,
missedTickets,
curBlockHeader)
*curBlockHeader)
}
// Allow any clients performing long polling via the
@ -2127,45 +2040,10 @@ out:
case isCurrentMsg:
msg.reply <- b.current()
case missedTicketsMsg:
tickets, err := b.chain.MissedTickets()
msg.reply <- missedTicketsResponse{
Tickets: tickets,
err: err,
}
case pauseMsg:
// Wait until the sender unpauses the manager.
<-msg.unpause
case ticketsForAddressMsg:
tickets, err := b.chain.TicketsWithAddress(msg.Address)
msg.reply <- ticketsForAddressResponse{
Tickets: tickets,
err: err,
}
case existsLiveTicketMsg:
exists, err := b.chain.CheckLiveTicket(msg.hash)
msg.reply <- existsLiveTicketResponse{
Exists: exists,
err: err,
}
case existsLiveTicketsMsg:
exists, err := b.chain.CheckLiveTickets(msg.hashes)
msg.reply <- existsLiveTicketsResponse{
Exists: exists,
err: err,
}
case liveTicketsMsg:
live, err := b.chain.LiveTickets()
msg.reply <- liveTicketsResponse{
Live: live,
err: err,
}
case getCurrentTemplateMsg:
cur := deepCopyBlockTemplate(b.cachedCurrentTemplate)
msg.reply <- getCurrentTemplateResponse{
@ -2223,41 +2101,46 @@ func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) {
block := band.Block
r := b.server.rpcServer
// Determine the winning tickets for this block, if it hasn't
// already been sent out.
// Determine the winning tickets for this block if it hasn't
// already been sent out. Skip notifications if we're not
// yet synced to the latest checkpoint or if we're before
// the height where we begin voting.
_, bestHeight := b.chainState.Best()
blockHeight := int64(block.MsgBlock().Header.Height)
tooOldForLotteryData := blockHeight <=
(bestHeight - maxLotteryDataBlockDelta)
if block.Height() >=
b.server.chainParams.StakeValidationHeight-1 &&
!tooOldForLotteryData &&
block.Height() > b.server.chainParams.LatestCheckpointHeight() &&
r != nil {
hash := block.Sha()
b.blockLotteryDataCacheMutex.Lock()
lotteryData := new(BlockLotteryData)
b.lotteryDataBroadcastMutex.Lock()
_, beenNotified := b.lotteryDataBroadcast[*hash]
b.lotteryDataBroadcastMutex.Unlock()
_, exists := b.blockLotteryDataCache[*hash]
if !exists {
// Obtain the winning tickets for this block. handleNotifyMsg
// should be safe for concurrent access of things contained
// within blockchain.
wt, ps, fs, err := b.chain.GetWinningTickets(*hash)
if err != nil {
b.blockLotteryDataCacheMutex.Unlock()
bmgrLog.Errorf("Couldn't calculate winning tickets for "+
"accepted block %v: %v", block.Sha(), err.Error())
} else {
lotteryData.finalState = fs
lotteryData.poolSize = uint32(ps)
lotteryData.ntfnData = &WinningTicketsNtfnData{
*hash,
int64(block.MsgBlock().Header.Height),
wt}
b.blockLotteryDataCache[*hash] = lotteryData
// Obtain the winning tickets for this block. handleNotifyMsg
// should be safe for concurrent access of things contained
// within blockchain.
wt, _, _, err := b.chain.LotteryDataForBlock(hash)
if err != nil {
bmgrLog.Errorf("Couldn't calculate winning tickets for "+
"accepted block %v: %v", block.Sha(), err.Error())
} else {
if !beenNotified {
ntfnData := &WinningTicketsNtfnData{
BlockHash: *hash,
BlockHeight: block.Height(),
Tickets: wt,
}
// Notify registered websocket clients of newly
// eligible tickets to vote on.
r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData)
b.blockLotteryDataCache[*hash] = lotteryData
b.blockLotteryDataCacheMutex.Unlock()
r.ntfnMgr.NotifyWinningTickets(ntfnData)
b.lotteryDataBroadcastMutex.Lock()
b.lotteryDataBroadcast[*hash] = struct{}{}
b.lotteryDataBroadcastMutex.Unlock()
}
}
}
@ -2744,27 +2627,13 @@ func (b *blockManager) GetBlockFromHash(h chainhash.Hash) (*dcrutil.Block, error
return response.block, response.err
}
// GetLotteryData returns the hashes of all the winning tickets for a given
// orphan block along with the pool size and the final state. It is funneled
// through the block manager since blockchain is not safe for concurrent access.
func (b *blockManager) GetLotteryData(hash chainhash.Hash) ([]chainhash.Hash,
uint32, [6]byte, error) {
reply := make(chan getLotterDataResponse)
b.msgChan <- getLotteryDataMsg{
hash: hash,
reply: reply}
response := <-reply
return response.winningTickets, response.poolSize, response.finalState,
response.err
}
// GetTopBlockFromChain obtains the current top block from HEAD of the blockchain.
// Returns a pointer to the cached copy of the block in memory.
func (b *blockManager) GetTopBlockFromChain() (*dcrutil.Block, error) {
reply := make(chan getTopBlockResponse)
b.msgChan <- getTopBlockMsg{reply: reply}
response := <-reply
return &response.block, response.err
return response.block, response.err
}
// ProcessBlock makes use of ProcessBlock on an internal instance of a block
@ -2798,14 +2667,6 @@ func (b *blockManager) IsCurrent() bool {
return <-reply
}
// MissedTickets returns a slice of missed ticket hashes.
func (b *blockManager) MissedTickets() (stake.SStxMemMap, error) {
reply := make(chan missedTicketsResponse)
b.msgChan <- missedTicketsMsg{reply: reply}
response := <-reply
return response.Tickets, response.err
}
// Pause pauses the block manager until the returned channel is closed.
//
// Note that while paused, all peer and block processing is halted. The
@ -2816,47 +2677,12 @@ func (b *blockManager) Pause() chan<- struct{} {
return c
}
// TicketsForAddress returns a list of ticket hashes owned by the address.
func (b *blockManager) TicketsForAddress(address dcrutil.Address) (
[]chainhash.Hash, error) {
reply := make(chan ticketsForAddressResponse)
b.msgChan <- ticketsForAddressMsg{Address: address, reply: reply}
response := <-reply
return response.Tickets, response.err
}
// ExistsLiveTicket returns whether or not a ticket exists in the live tickets
// database.
func (b *blockManager) ExistsLiveTicket(hash *chainhash.Hash) (bool, error) {
reply := make(chan existsLiveTicketResponse)
b.msgChan <- existsLiveTicketMsg{hash: hash, reply: reply}
response := <-reply
return response.Exists, response.err
}
// ExistsLiveTickets returns whether or not tickets in a slice of tickets exist
// in the live tickets database.
func (b *blockManager) ExistsLiveTickets(hashes []*chainhash.Hash) ([]bool, error) {
reply := make(chan existsLiveTicketsResponse)
b.msgChan <- existsLiveTicketsMsg{hashes: hashes, reply: reply}
response := <-reply
return response.Exists, response.err
}
// TicketPoolValue returns the current value of the total stake in the ticket
// pool.
func (b *blockManager) TicketPoolValue() (dcrutil.Amount, error) {
return b.chain.TicketPoolValue()
}
// LiveTickets returns the live tickets currently in the staking pool.
func (b *blockManager) LiveTickets() ([]*chainhash.Hash, error) {
reply := make(chan liveTicketsResponse)
b.msgChan <- liveTicketsMsg{reply: reply}
response := <-reply
return response.Live, response.err
}
// GetCurrentTemplate gets the current block template for mining.
func (b *blockManager) GetCurrentTemplate() *BlockTemplate {
reply := make(chan getCurrentTemplateResponse)
@ -2911,7 +2737,6 @@ func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockMan
var err error
bm.chain, err = blockchain.New(&blockchain.Config{
DB: s.db,
TMDB: s.tmdb,
ChainParams: s.chainParams,
Notifications: bm.handleNotifyMsg,
SigCache: s.sigCache,
@ -2932,15 +2757,24 @@ func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockMan
bmgrLog.Info("Checkpoints are disabled")
}
// Dump the blockchain here if asked for it, and quit.
if cfg.DumpBlockchain != "" {
err = dumpBlockChain(best.Height, s.db)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Block database dump to map completed, closing.")
}
// Query the DB for the current winning ticket data.
wt, ps, fs, err := bm.chain.GetWinningTickets(*best.Hash)
wt, ps, fs, err := bm.chain.LotteryDataForBlock(best.Hash)
if err != nil {
return nil, err
}
// Query the DB for the currently missed tickets.
missedTickets := bm.chain.GetMissedTickets()
if err != nil && best.Height >= bm.server.chainParams.StakeValidationHeight {
missedTickets, err := bm.chain.MissedTickets()
if err != nil {
return nil, err
}
@ -2958,10 +2792,9 @@ func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockMan
nextStakeDiff,
wt,
missedTickets,
curBlockHeader)
*curBlockHeader)
bm.blockLotteryDataCacheMutex = new(sync.Mutex)
bm.blockLotteryDataCache = make(map[chainhash.Hash]*BlockLotteryData)
bm.lotteryDataBroadcast = make(map[chainhash.Hash]struct{})
return &bm, nil
}
@ -3061,33 +2894,7 @@ func loadBlockDB() (database.DB, error) {
// dumpBlockChain dumps a map of the blockchain blocks as serialized bytes.
func dumpBlockChain(height int64, db database.DB) error {
blockchain := make(map[int64][]byte)
var hash chainhash.Hash
err := db.View(func(dbTx database.Tx) error {
for i := int64(0); i <= height; i++ {
// Fetch blocks and put them in the map
var serializedHeight [4]byte
dbnamespace.ByteOrder.PutUint32(serializedHeight[:], uint32(height))
meta := dbTx.Metadata()
heightIndex := meta.Bucket(dbnamespace.HeightIndexBucketName)
hashBytes := heightIndex.Get(serializedHeight[:])
if hashBytes == nil {
return fmt.Errorf("no block at height %d exists", height)
}
copy(hash[:], hashBytes)
blockBLocal, err := dbTx.FetchBlock(&hash)
if err != nil {
return err
}
blockB := make([]byte, len(blockBLocal))
copy(blockB, blockBLocal)
blockchain[i] = blockB
}
return nil
})
blockchain, err := blockchain.DumpBlockChain(db, height)
if err != nil {
return err
}
@ -3105,47 +2912,5 @@ func dumpBlockChain(height int64, db database.DB) error {
return err
}
if cfg.DumpBlockchain != "" {
err = dumpBlockChain(height, db)
if err != nil {
return err
}
return errors.New("Block database dump to map completed, closing.")
}
return nil
}
// loadTicketDB opens the ticket database and returns a handle to it.
func loadTicketDB(db database.DB,
chainParams *chaincfg.Params) (*stake.TicketDB, error) {
path := cfg.DataDir
filename := filepath.Join(path, "ticketdb.gob")
// Check to see if the tmdb exists on disk.
tmdbExists := true
if _, err := os.Stat(filename); os.IsNotExist(err) {
tmdbExists = false
}
var tmdb stake.TicketDB
if !tmdbExists {
// Load a blank copy of the ticket database and sync it.
err := tmdb.Initialize(chainParams, db)
return &tmdb, err
}
dcrdLog.Infof("Loading ticket database from disk")
err := tmdb.LoadTicketDBs(path,
"ticketdb.gob",
chainParams,
db)
if err != nil {
return nil, err
}
dcrdLog.Infof("Ticket DB loaded with top block height %v",
tmdb.GetTopBlock())
return &tmdb, nil
}

View File

@ -719,6 +719,15 @@ func (p *Params) TotalSubsidyProportions() uint16 {
return p.WorkRewardProportion + p.StakeRewardProportion + p.BlockTaxProportion
}
// LatestCheckpointHeight is the height of the latest checkpoint block in the
// parameters.
func (p *Params) LatestCheckpointHeight() int64 {
if len(p.Checkpoints) == 0 {
return 0
}
return p.Checkpoints[len(p.Checkpoints)-1].Height
}
func init() {
// Register all default networks when the package is initialized.
mustRegister(&MainNetParams)

44
dcrd.go
View File

@ -17,7 +17,6 @@ import (
"time"
"github.com/decred/dcrd/blockchain/indexers"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/limits"
)
@ -111,12 +110,6 @@ func dcrdMain(serverChan chan<- *server) error {
return nil
}
// Perform upgrades to dcrd as new versions require it.
if err := doUpgrades(); err != nil {
dcrdLog.Errorf("%v", err)
return err
}
if interruptRequested(interrupted) {
return nil
}
@ -167,42 +160,9 @@ func dcrdMain(serverChan chan<- *server) error {
return nil
}
// The ticket "DB" takes ages to load and serialize back out to a file.
// Load it asynchronously and if the process is interrupted during the
// load, discard the result since no cleanup is necessary.
lifetimeNotifier.notifyStartupEvent(lifetimeEventTicketDB)
type ticketDBResult struct {
ticketDB *stake.TicketDB
err error
}
ticketDBResultChan := make(chan ticketDBResult)
go func() {
tmdb, err := loadTicketDB(db, activeNetParams.Params)
ticketDBResultChan <- ticketDBResult{tmdb, err}
}()
var tmdb *stake.TicketDB
select {
case <-interrupted:
return nil
case r := <-ticketDBResultChan:
if r.err != nil {
dcrdLog.Errorf("%v", r.err)
return r.err
}
tmdb = r.ticketDB
}
defer func() {
lifetimeNotifier.notifyShutdownEvent(lifetimeEventTicketDB)
tmdb.Close()
err := tmdb.Store(cfg.DataDir, "ticketdb.gob")
if err != nil {
dcrdLog.Errorf("Failed to store ticket database: %v", err.Error())
}
}()
// Create server and start it.
lifetimeNotifier.notifyStartupEvent(lifetimeEventP2PServer)
server, err := newServer(cfg.Listeners, db, tmdb, activeNetParams.Params)
server, err := newServer(cfg.Listeners, db, activeNetParams.Params)
if err != nil {
// TODO(oga) this logging could do with some beautifying.
dcrdLog.Errorf("Unable to start server on %v: %v",
@ -242,7 +202,7 @@ func main() {
// limits the garbage collector from excessively overallocating during
// bursts. This value was arrived at with the help of profiling live
// usage.
debug.SetGCPercent(10)
debug.SetGCPercent(20)
// Up some limits.
if err := limits.SetLimits(); err != nil {

View File

@ -10,7 +10,6 @@ set -ex
# Automatic checks
test -z "$(go fmt $(glide novendor) | tee /dev/stderr)"
test -z "$(for package in $(glide novendor); do golint $package; done | grep -v 'ALL_CAPS\|OP_\|NewFieldVal' | tee /dev/stderr)"
test -z "$(go vet $(glide novendor) 2>&1 | tee /dev/stderr)"
env GORACE="halt_on_error=1" go test -v -race $(glide novendor)

View File

@ -323,10 +323,10 @@ func mergeUtxoView(viewA *blockchain.UtxoViewpoint, viewB *blockchain.UtxoViewpo
}
}
// hashExistsInList checks if a hash exists in a list of hashes.
func hashExistsInList(hash *chainhash.Hash, list []chainhash.Hash) bool {
for _, h := range list {
if hash.IsEqual(&h) {
// hashExistsInList checks if a hash exists in a list of hash pointers.
func hashInSlice(h chainhash.Hash, list []chainhash.Hash) bool {
for i := range list {
if h == list[i] {
return true
}
}
@ -1177,8 +1177,8 @@ func NewBlockTemplate(policy *mining.Policy, server *server,
}
missedTickets := make([]chainhash.Hash, len(chainState.missedTickets),
len(chainState.missedTickets))
for i, h := range chainState.missedTickets {
missedTickets[i] = h
for i := range chainState.missedTickets {
missedTickets[i] = chainState.missedTickets[i]
}
chainState.Unlock()
@ -1468,7 +1468,7 @@ mempoolLoop:
if isSSRtx {
ticketHash := &tx.MsgTx().TxIn[0].PreviousOutPoint.Hash
if !hashExistsInList(ticketHash, missedTickets) {
if !hashInSlice(*ticketHash, missedTickets) {
continue
}
}

View File

@ -1758,7 +1758,7 @@ func handleExistsLiveTicket(s *rpcServer, cmd interface{},
}
}
return s.server.blockManager.ExistsLiveTicket(hash)
return s.server.blockManager.chain.CheckLiveTicket(*hash), nil
}
// handleExistsLiveTickets implements the existslivetickets command.
@ -1785,9 +1785,9 @@ func handleExistsLiveTickets(s *rpcServer, cmd interface{},
}
hashesLen := len(txHashBlob) / 32
hashes := make([]*chainhash.Hash, hashesLen)
hashes := make([]chainhash.Hash, hashesLen)
for i := 0; i < hashesLen; i++ {
hashes[i], err = chainhash.NewHash(
newHash, err := chainhash.NewHash(
txHashBlob[i*chainhash.HashSize : (i+1)*chainhash.HashSize])
if err != nil {
return nil, &dcrjson.RPCError{
@ -1796,12 +1796,10 @@ func handleExistsLiveTickets(s *rpcServer, cmd interface{},
err.Error()),
}
}
hashes[i] = *newHash
}
exists, err := s.server.blockManager.ExistsLiveTickets(hashes)
if err != nil {
return nil, err
}
exists := s.server.blockManager.chain.CheckLiveTickets(hashes)
if len(exists) != hashesLen {
return nil, &dcrjson.RPCError{
Code: dcrjson.ErrRPCDatabase,
@ -4364,7 +4362,7 @@ func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter
// handleLiveTickets implements the livetickets command.
func handleLiveTickets(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
lt, err := s.server.blockManager.LiveTickets()
lt, err := s.server.blockManager.chain.LiveTickets()
if err != nil {
return nil, err
}
@ -4379,16 +4377,14 @@ func handleLiveTickets(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
// handleMissedTickets implements the missedtickets command.
func handleMissedTickets(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
mt, err := s.server.blockManager.MissedTickets()
mt, err := s.server.blockManager.chain.MissedTickets()
if err != nil {
return nil, err
}
mtString := make([]string, len(mt), len(mt))
itr := 0
for hash := range mt {
mtString[itr] = hash.String()
itr++
for i, hash := range mt {
mtString[i] = hash.String()
}
return dcrjson.MissedTicketsResult{Tickets: mtString}, nil
@ -4410,7 +4406,7 @@ func handlePing(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter
// handleRebroadcastMissed implements the rebroadcastmissed command.
func handleRebroadcastMissed(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
hash, height := s.server.blockManager.chainState.Best()
mt, err := s.server.blockManager.MissedTickets()
mt, err := s.server.blockManager.chain.MissedTickets()
if err != nil {
return nil, err
}
@ -4424,7 +4420,9 @@ func handleRebroadcastMissed(s *rpcServer, cmd interface{}, closeChan <-chan str
Hash: *hash,
Height: height,
StakeDifficulty: stakeDiff,
TicketMap: mt,
TicketsSpent: []chainhash.Hash{},
TicketsMissed: mt,
TicketsNew: []chainhash.Hash{},
}
s.ntfnMgr.NotifySpentAndMissedTickets(missedTicketsNtfn)
@ -4440,30 +4438,19 @@ func handleRebroadcastWinners(s *rpcServer, cmd interface{}, closeChan <-chan st
return nil, err
}
s.server.blockManager.blockLotteryDataCacheMutex.Lock()
defer s.server.blockManager.blockLotteryDataCacheMutex.Unlock()
for _, b := range blocks {
lotteryData := new(BlockLotteryData)
exists := false
_, exists = s.server.blockManager.blockLotteryDataCache[b]
if !exists {
winningTickets, poolSize, finalState, err :=
s.server.blockManager.GetLotteryData(b)
if err != nil {
return nil, err
}
lotteryData.finalState = finalState
lotteryData.poolSize = poolSize
lotteryData.ntfnData = &WinningTicketsNtfnData{
b,
height,
winningTickets}
s.server.blockManager.blockLotteryDataCache[b] = lotteryData
} else {
lotteryData, _ = s.server.blockManager.blockLotteryDataCache[b]
for i := range blocks {
winningTickets, _, _, err :=
s.server.blockManager.chain.LotteryDataForBlock(&blocks[i])
if err != nil {
return nil, err
}
ntfnData := &WinningTicketsNtfnData{
BlockHash: *hash,
BlockHeight: height,
Tickets: winningTickets,
}
s.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData)
s.ntfnMgr.NotifyWinningTickets(ntfnData)
}
return nil, nil
@ -5408,7 +5395,7 @@ func handleTicketsForAddress(s *rpcServer, cmd interface{}, closeChan <-chan str
return nil, err
}
tickets, err := s.server.blockManager.TicketsForAddress(addr)
tickets, err := s.server.blockManager.chain.TicketsWithAddress(addr)
if err != nil {
return nil, err
}

View File

@ -728,19 +728,16 @@ func (*wsNotificationManager) notifySpentAndMissedTickets(
// Create a ticket map to export as JSON.
ticketMap := make(map[string]string)
for _, ticket := range tnd.TicketMap {
if ticket.Missed == true {
ticketMap[ticket.SStxHash.String()] = "missed"
} else {
ticketMap[ticket.SStxHash.String()] = "spent"
}
for _, ticket := range tnd.TicketsMissed {
ticketMap[ticket.String()] = "missed"
}
for _, ticket := range tnd.TicketsSpent {
ticketMap[ticket.String()] = "spent"
}
// Notify interested websocket clients about the connected block.
ntfn := dcrjson.NewSpentAndMissedTicketsNtfn(tnd.Hash.String(),
int32(tnd.Height),
tnd.StakeDifficulty,
ticketMap)
int32(tnd.Height), tnd.StakeDifficulty, ticketMap)
marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn)
if err != nil {
@ -785,15 +782,13 @@ func (*wsNotificationManager) notifyNewTickets(clients map[chan struct{}]*wsClie
// Create a ticket map to export as JSON.
var tickets []string
for h := range tnd.TicketMap {
for _, h := range tnd.TicketsNew {
tickets = append(tickets, h.String())
}
// Notify interested websocket clients about the connected block.
ntfn := dcrjson.NewNewTicketsNtfn(tnd.Hash.String(),
int32(tnd.Height),
tnd.StakeDifficulty,
tickets)
ntfn := dcrjson.NewNewTicketsNtfn(tnd.Hash.String(), int32(tnd.Height),
tnd.StakeDifficulty, tickets)
marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn)
if err != nil {

View File

@ -24,7 +24,6 @@ import (
"github.com/decred/dcrd/addrmgr"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/indexers"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
@ -204,7 +203,6 @@ type server struct {
nat NAT
db database.DB
timeSource blockchain.MedianTimeSource
tmdb *stake.TicketDB
services wire.ServiceFlag
// The following fields are used for optional indexes. They will be nil
@ -1135,7 +1133,7 @@ func (s *server) pushTxMsg(sp *serverPeer, sha *chainhash.Hash, doneChan chan<-
// pushBlockMsg sends a block message for the provided block hash to the
// connected peer. An error is returned if the block hash is not known.
func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{}, waitChan <-chan struct{}) error {
block, err := sp.server.blockManager.chain.GetBlockFromHash(hash)
block, err := sp.server.blockManager.chain.FetchBlockFromHash(hash)
if err != nil {
peerLog.Tracef("Unable to fetch requested block hash %v: %v",
hash, err)
@ -2433,13 +2431,32 @@ out:
// newServer returns a new dcrd server configured to listen on addr for the
// decred network type specified by chainParams. Use start to begin accepting
// connections from peers.
func newServer(listenAddrs []string, db database.DB, tmdb *stake.TicketDB, chainParams *chaincfg.Params) (*server, error) {
func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Params) (*server, error) {
services := defaultServices
if cfg.NoPeerBloomFilters {
services &^= wire.SFNodeBloom
}
s := server{
chainParams: chainParams,
newPeers: make(chan *serverPeer, cfg.MaxPeers),
donePeers: make(chan *serverPeer, cfg.MaxPeers),
banPeers: make(chan *serverPeer, cfg.MaxPeers),
retryPeers: make(chan *serverPeer, cfg.MaxPeers),
wakeup: make(chan struct{}),
query: make(chan interface{}),
relayInv: make(chan relayMsg, cfg.MaxPeers),
broadcast: make(chan broadcastMsg, cfg.MaxPeers),
quit: make(chan struct{}),
modifyRebroadcastInv: make(chan interface{}),
peerHeightsUpdate: make(chan updatePeerHeightsMsg),
db: db,
timeSource: blockchain.NewMedianTime(),
services: services,
sigCache: txscript.NewSigCache(cfg.SigCacheMaxSize),
}
amgr := addrmgr.New(cfg.DataDir, dcrdLookup)
var listeners []net.Listener
@ -2565,29 +2582,9 @@ func newServer(listenAddrs []string, db database.DB, tmdb *stake.TicketDB, chain
return nil, errors.New("no valid listen address")
}
}
s := server{
listeners: listeners,
chainParams: chainParams,
addrManager: amgr,
newPeers: make(chan *serverPeer, cfg.MaxPeers),
donePeers: make(chan *serverPeer, cfg.MaxPeers),
banPeers: make(chan *serverPeer, cfg.MaxPeers),
retryPeers: make(chan *serverPeer, cfg.MaxPeers),
wakeup: make(chan struct{}),
query: make(chan interface{}),
relayInv: make(chan relayMsg, cfg.MaxPeers),
broadcast: make(chan broadcastMsg, cfg.MaxPeers),
quit: make(chan struct{}),
modifyRebroadcastInv: make(chan interface{}),
peerHeightsUpdate: make(chan updatePeerHeightsMsg),
nat: nat,
db: db,
tmdb: tmdb,
timeSource: blockchain.NewMedianTime(),
services: services,
sigCache: txscript.NewSigCache(cfg.SigCacheMaxSize),
}
s.listeners = listeners
s.addrManager = amgr
s.nat = nat
// Create the transaction and address indexes if needed.
//

View File

@ -1,56 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"os"
"path/filepath"
)
var ticketDBName = "ticketdb.gob"
var oldTicketDBName = "ticketdb_old.gob"
// checkForAndMoveOldTicketDb checks for an old copy of the ticket database
// from before ffldb was introduced and renames it in preparation for resyncing
// the blockchain.
func checkForAndMoveOldTicketDb() error {
ffldbPath := filepath.Join(cfg.DataDir,
blockDbNamePrefix+"_"+defaultDbType)
// No data path exists, break because this is a fresh start and
// the database must be constructed.
if _, err := os.Stat(cfg.DataDir); os.IsNotExist(err) {
return nil
}
// An old version of the chain exists, update it.
if _, err := os.Stat(ffldbPath); os.IsNotExist(err) {
// Rename the old ticket database.
ticketDBPath := filepath.Join(cfg.DataDir, ticketDBName)
if _, err := os.Stat(ticketDBPath); !os.IsNotExist(err) {
return err
}
oldTicketDBPath := filepath.Join(cfg.DataDir, oldTicketDBName)
err = os.Rename(ticketDBPath, oldTicketDBPath)
if !os.IsNotExist(err) {
return err
}
if err == nil {
dcrdLog.Warnf("The old ticket database file has been "+
"renamed %v. It can be safely removed if you "+
"no longer wish to roll back to an old "+
"version of the software.", oldTicketDBName)
}
}
return nil
}
// doUpgrades performs upgrades to dcrd as new versions require it.
func doUpgrades() error {
return checkForAndMoveOldTicketDb()
}