dcrd/blockchain/process.go
Aaron Campbell 8be96a8729 multi: Correct typos.
Correct typos found by reading code, ispell, and creative grepping.
2019-08-22 10:20:03 -05:00

214 lines
7.9 KiB
Go

// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"fmt"
"time"
"github.com/decred/dcrd/blockchain/standalone"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrutil/v2"
)
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
// performing chain processing and consensus rules checks.
type BehaviorFlags uint32
const (
// BFFastAdd may be set to indicate that several checks can be avoided
// for the block since it is already known to fit into the chain due to
// already proving it correct links into the chain up to a known
// checkpoint. This is primarily used for headers-first mode.
BFFastAdd BehaviorFlags = 1 << iota
// BFNoPoWCheck may be set to indicate the proof of work check which
// ensures a block hashes to a value less than the required target will
// not be performed.
BFNoPoWCheck
// BFNone is a convenience value to specifically indicate no flags.
BFNone BehaviorFlags = 0
)
// processOrphans determines if there are any orphans which depend on the passed
// block hash (they are no longer orphans if true) and potentially accepts them.
// It repeats the process for the newly accepted blocks (to detect further
// orphans which may no longer be orphans) until there are no more.
//
// The flags do not modify the behavior of this function directly, however they
// are needed to pass along to maybeAcceptBlock.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) error {
// Start with processing at least the passed hash. Leave a little room
// for additional orphan blocks that need to be processed without
// needing to grow the array in the common case.
processHashes := make([]*chainhash.Hash, 0, 10)
processHashes = append(processHashes, hash)
for len(processHashes) > 0 {
// Pop the first hash to process from the slice.
processHash := processHashes[0]
processHashes[0] = nil // Prevent GC leak.
processHashes = processHashes[1:]
// Look up all orphans that are parented by the block we just
// accepted. This will typically only be one, but it could
// be multiple if multiple blocks are mined and broadcast
// around the same time. The one with the most proof of work
// will eventually win out. An indexing for loop is
// intentionally used over a range here as range does not
// reevaluate the slice on each iteration nor does it adjust the
// index for the modified slice.
for i := 0; i < len(b.prevOrphans[*processHash]); i++ {
orphan := b.prevOrphans[*processHash][i]
if orphan == nil {
log.Warnf("Found a nil entry at index %d in the "+
"orphan dependency list for block %v", i,
processHash)
continue
}
// Remove the orphan from the orphan pool.
orphanHash := orphan.block.Hash()
b.removeOrphanBlock(orphan)
i--
// Potentially accept the block into the block chain.
_, err := b.maybeAcceptBlock(orphan.block, flags)
if err != nil {
return err
}
// Add this block to the list of blocks to process so
// any orphan blocks that depend on this block are
// handled too.
processHashes = append(processHashes, orphanHash)
}
}
return nil
}
// ProcessBlock is the main workhorse for handling insertion of new blocks into
// the block chain. It includes functionality such as rejecting duplicate
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
// the block chain along with best chain selection and reorganization.
//
// When no errors occurred during processing, the first return value indicates
// the length of the fork the block extended. In the case it either extended
// the best chain or is now the tip of the best chain due to causing a
// reorganize, the fork length will be 0. The second return value indicates
// whether or not the block is an orphan, in which case the fork length will
// also be zero as expected, because it, by definition, does not connect to the
// best chain.
//
// This function is safe for concurrent access.
func (b *BlockChain) ProcessBlock(block *dcrutil.Block, flags BehaviorFlags) (int64, bool, error) {
b.chainLock.Lock()
defer b.chainLock.Unlock()
fastAdd := flags&BFFastAdd == BFFastAdd
blockHash := block.Hash()
log.Tracef("Processing block %v", blockHash)
currentTime := time.Now()
defer func() {
elapsedTime := time.Since(currentTime)
log.Debugf("Block %v (height %v) finished processing in %s",
blockHash, block.Height(), elapsedTime)
}()
// The block must not already exist in the main chain or side chains.
if b.index.HaveBlock(blockHash) {
str := fmt.Sprintf("already have block %v", blockHash)
return 0, false, ruleError(ErrDuplicateBlock, str)
}
// The block must not already exist as an orphan.
if _, exists := b.orphans[*blockHash]; exists {
str := fmt.Sprintf("already have block (orphan) %v", blockHash)
return 0, false, ruleError(ErrDuplicateBlock, str)
}
// Perform preliminary sanity checks on the block and its transactions.
err := checkBlockSanity(block, b.timeSource, flags, b.chainParams)
if err != nil {
return 0, false, err
}
// Find the previous checkpoint and perform some additional checks based
// on the checkpoint. This provides a few nice properties such as
// preventing old side chain blocks before the last checkpoint,
// rejecting easy to mine, but otherwise bogus, blocks that could be
// used to eat memory, and ensuring expected (versus claimed) proof of
// work requirements since the previous checkpoint are met.
blockHeader := &block.MsgBlock().Header
checkpointNode, err := b.findPreviousCheckpoint()
if err != nil {
return 0, false, err
}
if checkpointNode != nil {
// Ensure the block timestamp is after the checkpoint timestamp.
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
if blockHeader.Timestamp.Before(checkpointTime) {
str := fmt.Sprintf("block %v has timestamp %v before "+
"last checkpoint timestamp %v", blockHash,
blockHeader.Timestamp, checkpointTime)
return 0, false, ruleError(ErrCheckpointTimeTooOld, str)
}
if !fastAdd {
// Even though the checks prior to now have already ensured the
// proof of work exceeds the claimed amount, the claimed amount
// is a field in the block header which could be forged. This
// check ensures the proof of work is at least the minimum
// expected based on elapsed time since the last checkpoint and
// maximum adjustment allowed by the retarget rules.
duration := blockHeader.Timestamp.Sub(checkpointTime)
requiredTarget := standalone.CompactToBig(b.calcEasiestDifficulty(
checkpointNode.bits, duration))
currentTarget := standalone.CompactToBig(blockHeader.Bits)
if currentTarget.Cmp(requiredTarget) > 0 {
str := fmt.Sprintf("block target difficulty of %064x "+
"is too low when compared to the previous "+
"checkpoint", currentTarget)
return 0, false, ruleError(ErrDifficultyTooLow, str)
}
}
}
// Handle orphan blocks.
prevHash := &blockHeader.PrevBlock
if !b.index.HaveBlock(prevHash) {
log.Infof("Adding orphan block %v with parent %v", blockHash,
prevHash)
b.addOrphanBlock(block)
// The fork length of orphans is unknown since they, by definition, do
// not connect to the best chain.
return 0, true, nil
}
// The block has passed all context independent checks and appears sane
// enough to potentially accept it into the block chain.
forkLen, err := b.maybeAcceptBlock(block, flags)
if err != nil {
return 0, false, err
}
// Accept any orphan blocks that depend on this block (they are no
// longer orphans) and repeat for those accepted blocks until there are
// no more.
err = b.processOrphans(blockHash, flags)
if err != nil {
return 0, false, err
}
log.Debugf("Accepted block %v", blockHash)
return forkLen, false, nil
}