diff --git a/blockchain/accept.go b/blockchain/accept.go index 92e4d640..f35e3a46 100644 --- a/blockchain/accept.go +++ b/blockchain/accept.go @@ -194,7 +194,7 @@ func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block, // The height of this block is one more than the referenced previous // block. - blockHeight := int64(0) + blockHeight := int32(0) if prevNode != nil { blockHeight = prevNode.height + 1 } diff --git a/blockchain/blocklocator.go b/blockchain/blocklocator.go index 4f35e78e..7da8280c 100644 --- a/blockchain/blocklocator.go +++ b/blockchain/blocklocator.go @@ -50,8 +50,8 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { // Attempt to find the height of the block that corresponds to the // passed hash, and if it's on a side chain, also find the height at // which it forks from the main chain. - blockHeight := int64(-1) - forkHeight := int64(-1) + blockHeight := int32(-1) + forkHeight := int32(-1) node, exists := b.index[*hash] if !exists { // Try to look up the height for passed block hash. Assume an @@ -82,7 +82,7 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { // in the BlockLocator comment and make sure to leave room for the // final genesis hash. iterNode := node - increment := int64(1) + increment := int32(1) for len(locator) < wire.MaxBlockLocatorsPerMsg-1 { // Once there are 10 locators, exponentially increase the // distance between each block locator. diff --git a/blockchain/chain.go b/blockchain/chain.go index 313a91fb..879a69a7 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -65,7 +65,7 @@ type blockNode struct { parentHash *chainhash.Hash // height is the position in the block chain. - height int64 + height int32 // workSum is the total amount of work in the chain up to and including // this node. @@ -100,7 +100,7 @@ type blockNode struct { // for the passed block. The work sum is updated accordingly when the node is // inserted into a chain. func newBlockNode(blockHeader *wire.BlockHeader, blockSha *chainhash.Hash, - height int64, voteBits []uint16) *blockNode { + height int32, voteBits []uint16) *blockNode { // Make a copy of the hash so the node doesn't keep a reference to part // of the full block/block header preventing it from being garbage // collected. @@ -167,7 +167,7 @@ type BlockChain struct { db database.Db tmdb *stake.TicketDB chainParams *chaincfg.Params - checkpointsByHeight map[int64]*chaincfg.Checkpoint + checkpointsByHeight map[int32]*chaincfg.Checkpoint notifications NotificationCallback minMemoryNodes int64 blocksPerRetarget int64 @@ -504,7 +504,7 @@ func (b *BlockChain) GenerateInitialIndex() error { // Start at the next block after the latest one on the next loop // iteration. - start += int64(len(hashList)) + start += int32(len(hashList)) } return nil @@ -812,7 +812,7 @@ func (b *BlockChain) pruneBlockNodes() error { // the latter loads the node and the goal is to find nodes still in // memory that can be pruned. newRootNode := b.bestChain - for i := int64(0); i < b.minMemoryNodes-1 && newRootNode != nil; i++ { + for i := int32(0); i < b.minMemoryNodes-1 && newRootNode != nil; i++ { newRootNode = newRootNode.parent } @@ -1551,9 +1551,9 @@ func maxInt64(a, b int64) int64 { func New(db database.Db, tmdb *stake.TicketDB, params *chaincfg.Params, c NotificationCallback) *BlockChain { // Generate a checkpoint by height map from the provided checkpoints. - var checkpointsByHeight map[int64]*chaincfg.Checkpoint + var checkpointsByHeight map[int32]*chaincfg.Checkpoint if len(params.Checkpoints) > 0 { - checkpointsByHeight = make(map[int64]*chaincfg.Checkpoint) + checkpointsByHeight = make(map[int32]*chaincfg.Checkpoint) for i := range params.Checkpoints { checkpoint := ¶ms.Checkpoints[i] checkpointsByHeight[checkpoint.Height] = checkpoint diff --git a/blockchain/checkpoints.go b/blockchain/checkpoints.go index 24ae0547..7132da1a 100644 --- a/blockchain/checkpoints.go +++ b/blockchain/checkpoints.go @@ -60,7 +60,7 @@ func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint { // verifyCheckpoint returns whether the passed block height and hash combination // match the hard-coded checkpoint data. It also returns true if there is no // checkpoint data for the passed block height. -func (b *BlockChain) verifyCheckpoint(height int64, hash *chainhash.Hash) bool { +func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool { if b.noCheckpoints || len(b.chainParams.Checkpoints) == 0 { return true } diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 7065060e..51e2166c 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -195,7 +195,7 @@ func loadTxStore(filename string) (blockchain.TxStore, error) { if err != nil { return nil, err } - txD.BlockHeight = int64(uintBuf) + txD.BlockHeight = int32(uintBuf) // Num spent bits. err = binary.Read(r, binary.LittleEndian, &uintBuf) diff --git a/blockchain/internal_test.go b/blockchain/internal_test.go index 72ddf216..cc25fc2b 100644 --- a/blockchain/internal_test.go +++ b/blockchain/internal_test.go @@ -18,6 +18,12 @@ import ( "time" ) +// TstSetCoinbaseMaturity makes the ability to set the coinbase maturity +// available to the test package. +func TstSetCoinbaseMaturity(maturity int32) { + coinbaseMaturity = maturity +} + // TstTimeSorter makes the internal timeSorter type available to the test // package. func TstTimeSorter(times []time.Time) sort.Interface { diff --git a/blockchain/txlookup.go b/blockchain/txlookup.go index 56ef2342..9bde71cf 100644 --- a/blockchain/txlookup.go +++ b/blockchain/txlookup.go @@ -49,7 +49,7 @@ const ViewpointPrevInvalidRegular = int8(4) type TxData struct { Tx *dcrutil.Tx Hash *chainhash.Hash - BlockHeight int64 + BlockHeight int32 BlockIndex uint32 Spent []bool Err error diff --git a/blockchain/validate.go b/blockchain/validate.go index d6b0dc4d..fd2cab74 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -48,6 +48,11 @@ const ( ) var ( + // coinbaseMaturity is the internal variable used for validating the + // spending of coinbase outputs. A variable rather than the exported + // constant is used because the tests need the ability to modify it. + coinbaseMaturity = int32(CoinbaseMaturity) + // zeroHash is the zero value for a wire.ShaHash and is defined as // a package level variable to avoid the need to create a new instance // every time a check is needed. @@ -113,6 +118,75 @@ func IsCoinBase(tx *dcrutil.Tx) bool { return IsCoinBaseTx(tx.MsgTx()) } +// IsFinalizedTransaction determines whether or not a transaction is finalized. +func IsFinalizedTransaction(tx *dcrutil.Tx, blockHeight int32, blockTime time.Time) bool { + msgTx := tx.MsgTx() + + // Lock time of zero means the transaction is finalized. + lockTime := msgTx.LockTime + if lockTime == 0 { + return true + } + + // The lock time field of a transaction is either a block height at + // which the transaction is finalized or a timestamp depending on if the + // value is before the txscript.LockTimeThreshold. When it is under the + // threshold it is a block height. + blockTimeOrHeight := int64(0) + if lockTime < txscript.LockTimeThreshold { + blockTimeOrHeight = int64(blockHeight) + } else { + blockTimeOrHeight = blockTime.Unix() + } + if int64(lockTime) < blockTimeOrHeight { + return true + } + + // At this point, the transaction's lock time hasn't occured yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for _, txIn := range msgTx.TxIn { + if txIn.Sequence != math.MaxUint32 { + return false + } + } + return true +} + +// isBIP0030Node returns whether or not the passed node represents one of the +// two blocks that violate the BIP0030 rule which prevents transactions from +// overwriting old ones. +func isBIP0030Node(node *blockNode) bool { + if node.height == 91842 && node.hash.IsEqual(block91842Hash) { + return true + } + + if node.height == 91880 && node.hash.IsEqual(block91880Hash) { + return true + } + + return false +} + +// CalcBlockSubsidy returns the subsidy amount a block at the provided height +// should have. This is mainly used for determining how much the coinbase for +// newly generated blocks awards as well as validating the coinbase for blocks +// has the expected value. +// +// The subsidy is halved every SubsidyHalvingInterval blocks. Mathematically +// this is: baseSubsidy / 2^(height/subsidyHalvingInterval) +// +// At the target block generation rate for the main network, this is +// approximately every 4 years. +func CalcBlockSubsidy(height int32, chainParams *chaincfg.Params) int64 { + if chainParams.SubsidyHalvingInterval == 0 { + return baseSubsidy + } + + // Equivalent to: baseSubsidy / 2^(height/subsidyHalvingInterval) + return baseSubsidy >> uint(height/chainParams.SubsidyHalvingInterval) +} + // CheckTransactionSanity performs some preliminary checks on a transaction to // ensure it is sane. These checks are context free. func CheckTransactionSanity(tx *dcrutil.Tx, params *chaincfg.Params) error { @@ -773,6 +847,110 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, return nil } +// checkBlockContext peforms several validation checks on the block which depend +// on its position within the block chain. +// +// The flags modify the behavior of this function as follows: +// - BFFastAdd: The transaction are not checked to see if they are finalized +// and the somewhat expensive BIP0034 validation is not performed. +// +// The flags are also passed to checkBlockHeaderContext. See its documentation +// for how the flags modify its behavior. +func (b *BlockChain) checkBlockContext(block *dcrutil.Block, prevNode *blockNode, flags BehaviorFlags) error { + // The genesis block is valid by definition. + if prevNode == nil { + return nil + } + + // Perform all block header related validation checks. + header := &block.MsgBlock().Header + err := b.checkBlockHeaderContext(header, prevNode, flags) + if err != nil { + return err + } + + fastAdd := flags&BFFastAdd == BFFastAdd + if !fastAdd { + // The height of this block is one more than the referenced + // previous block. + blockHeight := prevNode.height + 1 + + // Ensure all transactions in the block are finalized. + for _, tx := range block.Transactions() { + if !IsFinalizedTransaction(tx, blockHeight, + header.Timestamp) { + + str := fmt.Sprintf("block contains unfinalized "+ + "transaction %v", tx.Sha()) + return ruleError(ErrUnfinalizedTx, str) + } + } + + // Ensure coinbase starts with serialized block heights for + // blocks whose version is the serializedHeightVersion or newer + // once a majority of the network has upgraded. This is part of + // BIP0034. + if ShouldHaveSerializedBlockHeight(header) && + b.isMajorityVersion(serializedHeightVersion, prevNode, + b.chainParams.BlockEnforceNumRequired) { + + coinbaseTx := block.Transactions()[0] + err := checkSerializedHeight(coinbaseTx, blockHeight) + if err != nil { + return err + } + } + } + + return nil +} + +// ExtractCoinbaseHeight attempts to extract the height of the block from the +// scriptSig of a coinbase transaction. Coinbase heights are only present in +// blocks of version 2 or later. This was added as part of BIP0034. +func ExtractCoinbaseHeight(coinbaseTx *dcrutil.Tx) (int32, error) { + sigScript := coinbaseTx.MsgTx().TxIn[0].SignatureScript + if len(sigScript) < 1 { + str := "the coinbase signature script for blocks of " + + "version %d or greater must start with the " + + "length of the serialized block height" + str = fmt.Sprintf(str, serializedHeightVersion) + return 0, ruleError(ErrMissingCoinbaseHeight, str) + } + + serializedLen := int(sigScript[0]) + if len(sigScript[1:]) < serializedLen { + str := "the coinbase signature script for blocks of " + + "version %d or greater must start with the " + + "serialized block height" + str = fmt.Sprintf(str, serializedLen) + return 0, ruleError(ErrMissingCoinbaseHeight, str) + } + + serializedHeightBytes := make([]byte, 8, 8) + copy(serializedHeightBytes, sigScript[1:serializedLen+1]) + serializedHeight := binary.LittleEndian.Uint64(serializedHeightBytes) + + return int32(serializedHeight), nil +} + +// checkSerializedHeight checks if the signature script in the passed +// transaction starts with the serialized block height of wantHeight. +func checkSerializedHeight(coinbaseTx *dcrutil.Tx, wantHeight int32) error { + serializedHeight, err := ExtractCoinbaseHeight(coinbaseTx) + if err != nil { + return err + } + + if serializedHeight != wantHeight { + str := fmt.Sprintf("the coinbase signature script serialized "+ + "block height is %d when %d was expected", + serializedHeight, wantHeight) + return ruleError(ErrBadCoinbaseHeight, str) + } + return nil +} + // isTransactionSpent returns whether or not the provided transaction data // describes a fully spent transaction. A fully spent transaction is one where // all outputs have been spent. @@ -1344,7 +1522,7 @@ func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore, // amount, and verifying the signatures to prove the spender was the owner of // the decred and therefore allowed to spend them. As it checks the inputs, // it also calculates the total fees for the transaction and returns that value. -func CheckTransactionInputs(tx *dcrutil.Tx, txHeight int64, txStore TxStore, +func CheckTransactionInputs(tx *dcrutil.Tx, txHeight int32, txStore TxStore, checkFraudProof bool, chainParams *chaincfg.Params) (int64, error) { // Expired transactions are not allowed. if tx.MsgTx().Expiry != wire.NoExpiryValue { diff --git a/blockchain/validate_test.go b/blockchain/validate_test.go index b51db5ca..09740147 100644 --- a/blockchain/validate_test.go +++ b/blockchain/validate_test.go @@ -91,7 +91,7 @@ func TestBlockValidationRules(t *testing.T) { // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) - blockChain := make(map[int64][]byte) + blockChain := make(map[int32][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { @@ -100,7 +100,7 @@ func TestBlockValidationRules(t *testing.T) { // Insert blocks 1 to 142 and perform various test. Block 1 has // special properties, so make sure those validate correctly first. - block1Bytes := blockChain[int64(1)] + block1Bytes := blockChain[int32(1)] timeSource := blockchain.NewMedianTime() // ---------------------------------------------------------------------------- @@ -118,7 +118,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) b1test := dcrutil.NewBlock(noCoinbaseOuts1) - b1test.SetHeight(int64(1)) + b1test.SetHeight(int32(1)) err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) if err != nil { @@ -141,7 +141,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) b1test = dcrutil.NewBlock(noCoinbaseOuts1) - b1test.SetHeight(int64(1)) + b1test.SetHeight(int32(1)) err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) if err != nil { @@ -164,7 +164,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) b1test = dcrutil.NewBlock(noCoinbaseOuts1) - b1test.SetHeight(int64(1)) + b1test.SetHeight(int32(1)) err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) if err != nil { @@ -182,11 +182,11 @@ func TestBlockValidationRules(t *testing.T) { // Add the rest of the blocks up to the stake early test block. stakeEarlyTest := 142 for i := 1; i < stakeEarlyTest; i++ { - bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + bl, err := dcrutil.NewBlockFromBytes(blockChain[int32(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } - bl.SetHeight(int64(i)) + bl.SetHeight(int32(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { @@ -197,7 +197,7 @@ func TestBlockValidationRules(t *testing.T) { // ---------------------------------------------------------------------------- // ErrInvalidEarlyStakeTx // There are multiple paths to this error, but here we try an early SSGen. - block142Bytes := blockChain[int64(stakeEarlyTest)] + block142Bytes := blockChain[int32(stakeEarlyTest)] earlySSGen142 := new(wire.MsgBlock) earlySSGen142.FromBytes(block142Bytes) @@ -217,7 +217,7 @@ func TestBlockValidationRules(t *testing.T) { earlySSGen142.AddSTransaction(mtxFromB) recalculateMsgBlockMerkleRootsSize(earlySSGen142) b142test := dcrutil.NewBlock(earlySSGen142) - b142test.SetHeight(int64(stakeEarlyTest)) + b142test.SetHeight(int32(stakeEarlyTest)) err = blockchain.CheckWorklessBlockSanity(b142test, timeSource, simNetParams) if err == nil { @@ -238,7 +238,7 @@ func TestBlockValidationRules(t *testing.T) { earlyBadVoteBits42.FromBytes(block142Bytes) earlyBadVoteBits42.Header.VoteBits ^= 0x80 b142test = dcrutil.NewBlock(earlyBadVoteBits42) - b142test.SetHeight(int64(stakeEarlyTest)) + b142test.SetHeight(int32(stakeEarlyTest)) err = blockchain.CheckWorklessBlockSanity(b142test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -259,11 +259,11 @@ func TestBlockValidationRules(t *testing.T) { testsIdx2 := 154 testsIdx3 := 166 for i := stakeEarlyTest; i < testsIdx1; i++ { - bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + bl, err := dcrutil.NewBlockFromBytes(blockChain[int32(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } - bl.SetHeight(int64(i)) + bl.SetHeight(int32(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { @@ -272,16 +272,16 @@ func TestBlockValidationRules(t *testing.T) { } // Make sure the last block validates. - block153, err := dcrutil.NewBlockFromBytes(blockChain[int64(testsIdx1)]) + block153, err := dcrutil.NewBlockFromBytes(blockChain[int32(testsIdx1)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } - block153.SetHeight(int64(testsIdx1)) + block153.SetHeight(int32(testsIdx1)) err = chain.CheckConnectBlock(block153) if err != nil { t.Errorf("CheckConnectBlock error: %v", err.Error()) } - block153Bytes := blockChain[int64(testsIdx1)] + block153Bytes := blockChain[int32(testsIdx1)] // ---------------------------------------------------------------------------- // ErrBadMerkleRoot 1 @@ -290,7 +290,7 @@ func TestBlockValidationRules(t *testing.T) { badMerkleRoot153.FromBytes(block153Bytes) badMerkleRoot153.Header.MerkleRoot[0] ^= 0x01 b153test := dcrutil.NewBlock(badMerkleRoot153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -313,7 +313,7 @@ func TestBlockValidationRules(t *testing.T) { badMerkleRoot153.FromBytes(block153Bytes) badMerkleRoot153.Header.StakeRoot[0] ^= 0x01 b153test = dcrutil.NewBlock(badMerkleRoot153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -335,7 +335,7 @@ func TestBlockValidationRules(t *testing.T) { badDifficulty153.FromBytes(block153Bytes) badDifficulty153.Header.Bits = 0x207ffffe b153test = dcrutil.NewBlock(badDifficulty153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -350,7 +350,7 @@ func TestBlockValidationRules(t *testing.T) { badBlockSize153.FromBytes(block153Bytes) badBlockSize153.Header.Size = 0x20ffff71 b153test = dcrutil.NewBlock(badBlockSize153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNoPoWCheck) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -365,7 +365,7 @@ func TestBlockValidationRules(t *testing.T) { badHash153.FromBytes(block153Bytes) badHash153.Header.Size = 0x20ffff70 b153test = dcrutil.NewBlock(badHash153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -380,7 +380,7 @@ func TestBlockValidationRules(t *testing.T) { missingParent153.FromBytes(block153Bytes) missingParent153.Header.PrevBlock[8] ^= 0x01 b153test = dcrutil.NewBlock(missingParent153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -401,7 +401,7 @@ func TestBlockValidationRules(t *testing.T) { badSubsidy153.Transactions[0].TxOut[2].Value++ recalculateMsgBlockMerkleRootsSize(badSubsidy153) b153test = dcrutil.NewBlock(badSubsidy153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -424,7 +424,7 @@ func TestBlockValidationRules(t *testing.T) { badCBOutpoint153.Transactions[0].TxIn[0].PreviousOutPoint.Hash[0] ^= 0x01 recalculateMsgBlockMerkleRootsSize(badCBOutpoint153) b153test = dcrutil.NewBlock(badCBOutpoint153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -445,7 +445,7 @@ func TestBlockValidationRules(t *testing.T) { badCBFraudProof153.Transactions[0].TxIn[0].BlockHeight = 0x12345678 recalculateMsgBlockMerkleRootsSize(badCBFraudProof153) b153test = dcrutil.NewBlock(badCBFraudProof153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -467,7 +467,7 @@ func TestBlockValidationRules(t *testing.T) { badCBAmountIn153.Transactions[0].TxIn[0].ValueIn = 0x1234567890123456 recalculateMsgBlockMerkleRootsSize(badCBAmountIn153) b153test = dcrutil.NewBlock(badCBAmountIn153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -489,7 +489,7 @@ func TestBlockValidationRules(t *testing.T) { badSBAmountIn153.STransactions[0].TxIn[0].ValueIn = 0x1234567890123456 recalculateMsgBlockMerkleRootsSize(badSBAmountIn153) b153test = dcrutil.NewBlock(badSBAmountIn153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -516,7 +516,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badStakebaseOutpoint153) badStakebaseOutpoint153.Header.Voters-- b153test = dcrutil.NewBlock(badStakebaseOutpoint153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -542,7 +542,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(ssgenInRegular153) b153test = dcrutil.NewBlock(ssgenInRegular153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -566,7 +566,7 @@ func TestBlockValidationRules(t *testing.T) { badStakebaseSS recalculateMsgBlockMerkleRootsSize(badStakebaseSS153) b153test = dcrutil.NewBlock(badStakebaseSS153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -589,7 +589,7 @@ func TestBlockValidationRules(t *testing.T) { badStakebaseScr153.STransactions[0].TxIn[0].SignatureScript[0] ^= 0x01 recalculateMsgBlockMerkleRootsSize(badStakebaseScr153) b153test = dcrutil.NewBlock(badStakebaseScr153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -612,7 +612,7 @@ func TestBlockValidationRules(t *testing.T) { badSSRtxNum153.Header.Revocations = 2 b153test = dcrutil.NewBlock(badSSRtxNum153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -639,7 +639,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(ssrtxPayeesMismatch153) b153test = dcrutil.NewBlock(ssrtxPayeesMismatch153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -664,7 +664,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSSRtxPayee153) b153test = dcrutil.NewBlock(badSSRtxPayee153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -691,7 +691,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSSRtxPayee153) b153test = dcrutil.NewBlock(badSSRtxPayee153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -726,7 +726,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSSRtx153) b153test = dcrutil.NewBlock(badSSRtx153) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) if err != nil { @@ -747,16 +747,16 @@ func TestBlockValidationRules(t *testing.T) { block153MsgBlock := new(wire.MsgBlock) block153MsgBlock.FromBytes(block153Bytes) b153test = dcrutil.NewBlock(block153MsgBlock) - b153test.SetHeight(int64(testsIdx1)) + b153test.SetHeight(int32(testsIdx1)) _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) if err != nil { t.Errorf("Got unexpected error processing block 153 %v", err) } - block154Bytes := blockChain[int64(testsIdx2)] + block154Bytes := blockChain[int32(testsIdx2)] block154MsgBlock := new(wire.MsgBlock) block154MsgBlock.FromBytes(block154Bytes) b154test := dcrutil.NewBlock(block154MsgBlock) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) // The incoming block should pass fine. err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) @@ -777,7 +777,7 @@ func TestBlockValidationRules(t *testing.T) { notEnoughStake154.AddSTransaction(mtxFromB) recalculateMsgBlockMerkleRootsSize(notEnoughStake154) b154test = dcrutil.NewBlock(notEnoughStake154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) // This fails both checks. err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) @@ -800,7 +800,7 @@ func TestBlockValidationRules(t *testing.T) { badFreshStake154.Header.FreshStake++ recalculateMsgBlockMerkleRootsSize(badFreshStake154) b154test = dcrutil.NewBlock(badFreshStake154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) // This passes. err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) @@ -829,7 +829,7 @@ func TestBlockValidationRules(t *testing.T) { notEnoughVotes154.STransactions = notEnoughVotes154.STransactions[0:2] recalculateMsgBlockMerkleRootsSize(notEnoughVotes154) b154test = dcrutil.NewBlock(notEnoughVotes154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -868,7 +868,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(tooManyVotes154) b154test = dcrutil.NewBlock(tooManyVotes154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) // Fails tax amount test. err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) @@ -892,7 +892,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(nonChosenTicket154) b154test = dcrutil.NewBlock(nonChosenTicket154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -919,7 +919,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(wrongBlockVote154) b154test = dcrutil.NewBlock(wrongBlockVote154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -945,7 +945,7 @@ func TestBlockValidationRules(t *testing.T) { sstxsIn154...) recalculateMsgBlockMerkleRootsSize(votesMismatch154) b154test = dcrutil.NewBlock(votesMismatch154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -968,7 +968,7 @@ func TestBlockValidationRules(t *testing.T) { badVoteBit154.FromBytes(block154Bytes) badVoteBit154.Header.VoteBits &= 0xFFFE // Zero critical voteBit b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -998,7 +998,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1028,7 +1028,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1058,7 +1058,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1093,7 +1093,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1128,7 +1128,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1163,7 +1163,7 @@ func TestBlockValidationRules(t *testing.T) { } recalculateMsgBlockMerkleRootsSize(badVoteBit154) b154test = dcrutil.NewBlock(badVoteBit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1190,7 +1190,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSStxCommit154) b154test = dcrutil.NewBlock(badSStxCommit154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1224,7 +1224,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSSGenPayee154) b154test = dcrutil.NewBlock(badSSGenPayee154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1249,7 +1249,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(badSSGenPayee154) b154test = dcrutil.NewBlock(badSSGenPayee154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1311,7 +1311,7 @@ func TestBlockValidationRules(t *testing.T) { spendTaggedIn154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(spendTaggedIn154) b154test = dcrutil.NewBlock(spendTaggedIn154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1340,7 +1340,7 @@ func TestBlockValidationRules(t *testing.T) { spendTaggedOut154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(spendTaggedOut154) b154test = dcrutil.NewBlock(spendTaggedOut154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1362,7 +1362,7 @@ func TestBlockValidationRules(t *testing.T) { badFinalState154.FromBytes(block154Bytes) badFinalState154.Header.FinalState[0] ^= 0x01 b154test = dcrutil.NewBlock(badFinalState154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1384,7 +1384,7 @@ func TestBlockValidationRules(t *testing.T) { badPoolSize154.FromBytes(block154Bytes) badPoolSize154.Header.PoolSize++ b154test = dcrutil.NewBlock(badPoolSize154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1416,7 +1416,7 @@ func TestBlockValidationRules(t *testing.T) { errTxTreeIn154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(errTxTreeIn154) b154test = dcrutil.NewBlock(errTxTreeIn154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1444,7 +1444,7 @@ func TestBlockValidationRules(t *testing.T) { badBlockHeight154.FromBytes(block154Bytes) badBlockHeight154.Header.Height++ b154test = dcrutil.NewBlock(badBlockHeight154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) // Throws ProcessBlock error through checkBlockContext. _, _, err = chain.ProcessBlock(b154test, timeSource, blockchain.BFNoPoWCheck) @@ -1463,7 +1463,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(taxMissing154) b154test = dcrutil.NewBlock(taxMissing154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -1485,7 +1485,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(taxMissing154) b154test = dcrutil.NewBlock(taxMissing154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -1507,7 +1507,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(taxMissing154) b154test = dcrutil.NewBlock(taxMissing154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err == nil || err.(blockchain.RuleError).GetCode() != @@ -1532,7 +1532,7 @@ func TestBlockValidationRules(t *testing.T) { expiredTx154.AddTransaction(mtxFromB) recalculateMsgBlockMerkleRootsSize(expiredTx154) b154test = dcrutil.NewBlock(expiredTx154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1559,7 +1559,7 @@ func TestBlockValidationRules(t *testing.T) { badValueIn154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(badValueIn154) b154test = dcrutil.NewBlock(badValueIn154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1586,7 +1586,7 @@ func TestBlockValidationRules(t *testing.T) { badHeightProof154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(badHeightProof154) b154test = dcrutil.NewBlock(badHeightProof154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1613,7 +1613,7 @@ func TestBlockValidationRules(t *testing.T) { badIndexProof154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(badIndexProof154) b154test = dcrutil.NewBlock(badIndexProof154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1640,7 +1640,7 @@ func TestBlockValidationRules(t *testing.T) { badScrVal154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(badScrVal154) b154test = dcrutil.NewBlock(badScrVal154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1663,7 +1663,7 @@ func TestBlockValidationRules(t *testing.T) { badScrValS154.STransactions[5].TxIn[0].SignatureScript[6] ^= 0x01 recalculateMsgBlockMerkleRootsSize(badScrValS154) b154test = dcrutil.NewBlock(badScrValS154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1693,7 +1693,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(invalMissingInsS154) b154test = dcrutil.NewBlock(invalMissingInsS154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1720,7 +1720,7 @@ func TestBlockValidationRules(t *testing.T) { malformedScr154.Transactions[11] = mtxFromB recalculateMsgBlockMerkleRootsSize(malformedScr154) b154test = dcrutil.NewBlock(malformedScr154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1759,7 +1759,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(spendZeroValueIn154) b154test = dcrutil.NewBlock(spendZeroValueIn154) - b154test.SetHeight(int64(testsIdx2)) + b154test.SetHeight(int32(testsIdx2)) err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) if err != nil { @@ -1781,11 +1781,11 @@ func TestBlockValidationRules(t *testing.T) { // Load up to block 166. 165 invalidates its previous tx tree, making // it good for testing. for i := testsIdx2; i < testsIdx3; i++ { - bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + bl, err := dcrutil.NewBlockFromBytes(blockChain[int32(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } - bl.SetHeight(int64(i)) + bl.SetHeight(int32(i)) // Double check and ensure there's no cross tree spending in // block 164. @@ -1809,7 +1809,7 @@ func TestBlockValidationRules(t *testing.T) { t.Errorf("ProcessBlock error: %v", err.Error()) } } - block166Bytes := blockChain[int64(testsIdx3)] + block166Bytes := blockChain[int32(testsIdx3)] // ---------------------------------------------------------------------------- // Attempt to spend from TxTreeRegular of block 164, which should never @@ -1826,7 +1826,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(spendInvalid166) b166test := dcrutil.NewBlock(spendInvalid166) - b166test.SetHeight(int64(testsIdx3)) + b166test.SetHeight(int32(testsIdx3)) err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) if err != nil { @@ -1874,7 +1874,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(sstxSpendInvalid166) b166test = dcrutil.NewBlock(sstxSpendInvalid166) - b166test.SetHeight(int64(testsIdx3)) + b166test.SetHeight(int32(testsIdx3)) err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) if err != nil { @@ -1917,7 +1917,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(sstxSpend2Invalid166) b166test = dcrutil.NewBlock(sstxSpend2Invalid166) - b166test.SetHeight(int64(testsIdx3)) + b166test.SetHeight(int32(testsIdx3)) err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) if err != nil { @@ -1950,7 +1950,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(sstxSpend3Invalid166) b166test = dcrutil.NewBlock(sstxSpend3Invalid166) - b166test.SetHeight(int64(testsIdx3)) + b166test.SetHeight(int32(testsIdx3)) err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) if err != nil { @@ -1976,7 +1976,7 @@ func TestBlockValidationRules(t *testing.T) { recalculateMsgBlockMerkleRootsSize(regTxSpendStakeIn166) b166test = dcrutil.NewBlock(regTxSpendStakeIn166) - b166test.SetHeight(int64(testsIdx3)) + b166test.SetHeight(int32(testsIdx3)) err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) if err != nil { diff --git a/blockmanager.go b/blockmanager.go index 85fd1ae1..0023422f 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -415,7 +415,7 @@ type setParentTemplateResponse struct { // headerNode is used as a node in a list of headers that are linked together // between checkpoints. type headerNode struct { - height int64 + height int32 sha *chainhash.Hash } @@ -442,7 +442,7 @@ type chainState struct { // chain. // // This function is safe for concurrent access. -func (c *chainState) Best() (*chainhash.Hash, int64) { +func (c *chainState) Best() (*chainhash.Hash, int32) { c.Lock() defer c.Unlock() @@ -545,7 +545,7 @@ type blockManager struct { // resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer. -func (b *blockManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int64) { +func (b *blockManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { b.headersFirstMode = false b.headerList.Init() b.startHeader = nil @@ -564,7 +564,7 @@ func (b *blockManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight // safe for concurrent access and the block manager is typically quite busy // processing block and inventory. func (b *blockManager) updateChainState(newestHash *chainhash.Hash, - newestHeight int64, + newestHeight int32, finalState [6]byte, poolSize uint32, winningTickets []chainhash.Hash, @@ -594,7 +594,7 @@ func (b *blockManager) updateChainState(newestHash *chainhash.Hash, // It returns nil when there is not one either because the height is already // later than the final checkpoint or some other reason such as disabled // checkpoints. -func (b *blockManager) findNextHeaderCheckpoint(height int64) *chaincfg.Checkpoint { +func (b *blockManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint { // There is no next checkpoint if checkpoints are disabled or there are // none for this current network. if cfg.DisableCheckpoints { @@ -919,7 +919,7 @@ func (b *blockManager) current() bool { // TODO(oga) we can get chain to return the height of each block when we // parse an orphan, which would allow us to update the height of peers // from what it was at initial handshake. - if err != nil || height < int64(b.syncPeer.startingHeight) { + if err != nil || height < b.syncPeer.lastBlock { return false } diff --git a/chaincfg/params.go b/chaincfg/params.go index cf7b42e7..c4beba71 100644 --- a/chaincfg/params.go +++ b/chaincfg/params.go @@ -57,7 +57,7 @@ var CPUMinerThreads = 1 // documentation for chain.IsCheckpointCandidate for details on the selection // criteria. type Checkpoint struct { - Height int64 + Height int32 Hash *chainhash.Hash } diff --git a/chainindexer.go b/chainindexer.go index 5d2726af..1801a977 100644 --- a/chainindexer.go +++ b/chainindexer.go @@ -48,8 +48,8 @@ type addrIndexer struct { shutdown int32 state indexState progressLogger *blockProgressLogger - currentIndexTip int64 - chainTip int64 + currentIndexTip int32 + chainTip int32 sync.Mutex } diff --git a/cmd/dcrctl/version.go b/cmd/dcrctl/version.go index 8684445d..218b7ebd 100644 --- a/cmd/dcrctl/version.go +++ b/cmd/dcrctl/version.go @@ -19,7 +19,7 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr const ( appMajor uint = 0 appMinor uint = 0 - appPatch uint = 6 + appPatch uint = 5 // appPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. diff --git a/cmd/dropafter/dropafter.go b/cmd/dropafter/dropafter.go index 07ccb96f..1a7a48da 100644 --- a/cmd/dropafter/dropafter.go +++ b/cmd/dropafter/dropafter.go @@ -165,7 +165,7 @@ var errBadShaPrefix = errors.New("invalid prefix") var errBadShaLen = errors.New("invalid len") var errBadShaChar = errors.New("invalid character") -func parsesha(argstr string) (argtype int, height int64, psha *chainhash.Hash, err error) { +func parsesha(argstr string) (argtype int, height int32, psha *chainhash.Hash, err error) { var sha chainhash.Hash var hashbuf string @@ -187,7 +187,7 @@ func parsesha(argstr string) (argtype int, height int64, psha *chainhash.Hash, e var h int h, err = strconv.Atoi(argstr) if err == nil { - height = int64(h) + height = int32(h) return } log.Infof("Unable to parse height %v, err %v", height, err) diff --git a/cmd/findcheckpoint/findcheckpoint.go b/cmd/findcheckpoint/findcheckpoint.go index 2dc82c8e..25b8af04 100644 --- a/cmd/findcheckpoint/findcheckpoint.go +++ b/cmd/findcheckpoint/findcheckpoint.go @@ -67,7 +67,7 @@ func findCandidates(db database.Db, latestHash *chainhash.Hash) ([]*chaincfg.Che // The latest known block must be at least the last known checkpoint // plus required checkpoint confirmations. - checkpointConfirmations := int64(blockchain.CheckpointConfirmations) + checkpointConfirmations := int32(blockchain.CheckpointConfirmations) requiredHeight := latestCheckpoint.Height + checkpointConfirmations if block.Height() < requiredHeight { return nil, fmt.Errorf("the block database is only at height "+ @@ -92,7 +92,7 @@ func findCandidates(db database.Db, latestHash *chainhash.Hash) ([]*chaincfg.Che // Loop backwards through the chain to find checkpoint candidates. candidates := make([]*chaincfg.Checkpoint, 0, cfg.NumCandidates) - numTested := int64(0) + numTested := int32(0) for len(candidates) < cfg.NumCandidates && block.Height() > requiredHeight { // Display progress. if numTested%progressInterval == 0 { diff --git a/cpuminer.go b/cpuminer.go index 527d84c0..78beccf5 100644 --- a/cpuminer.go +++ b/cpuminer.go @@ -194,8 +194,6 @@ func (m *CPUMiner) submitBlock(block *dcrutil.Block) bool { func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit chan struct{}) bool { - blockHeight := int64(msgBlock.Header.Height) - // Choose a random extra nonce offset for this block template and // worker. enOffset, err := wire.RandomUint64() diff --git a/database/db.go b/database/db.go index 40b88d52..32c348c2 100644 --- a/database/db.go +++ b/database/db.go @@ -33,7 +33,7 @@ var ( // AllShas is a special value that can be used as the final sha when requesting // a range of shas by height to request them all. -const AllShas = int64(^uint64(0) >> 1) +const AllShas = int32(^uint32(0) >> 1) // Db defines a generic interface that is used to request and insert data into // the decred block chain. This interface is intended to be agnostic to actual @@ -58,7 +58,7 @@ type Db interface { FetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err error) // FetchBlockHeightBySha returns the block height for the given hash. - FetchBlockHeightBySha(sha *chainhash.Hash) (height int64, err error) + FetchBlockHeightBySha(sha *chainhash.Hash) (height int32, err error) // FetchBlockHeaderBySha returns a wire.BlockHeader for the given // sha. The implementation may cache the underlying data if desired. @@ -66,13 +66,13 @@ type Db interface { // FetchBlockShaByHeight returns a block hash based on its height in the // block chain. - FetchBlockShaByHeight(height int64) (sha *chainhash.Hash, err error) + FetchBlockShaByHeight(height int32) (sha *chainhash.Hash, err error) // FetchHeightRange looks up a range of blocks by the start and ending // heights. Fetch is inclusive of the start height and exclusive of the // ending height. To fetch all hashes from the start height until no // more are present, use the special id `AllShas'. - FetchHeightRange(startHeight, endHeight int64) (rshalist []chainhash.Hash, err error) + FetchHeightRange(startHeight, endHeight int32) (rshalist []chainhash.Hash, err error) // ExistsTxSha returns whether or not the given tx hash is present in // the database @@ -108,19 +108,19 @@ type Db interface { // into the database. The first block inserted into the database // will be treated as the genesis block. Every subsequent block insert // requires the referenced parent block to already exist. - InsertBlock(block *dcrutil.Block) (height int64, err error) + InsertBlock(block *dcrutil.Block) (height int32, err error) // NewestSha returns the hash and block height of the most recent (end) // block of the block chain. It will return the zero hash, -1 for // the block height, and no error (nil) if there are not any blocks in // the database yet. - NewestSha() (sha *chainhash.Hash, height int64, err error) + NewestSha() (sha *chainhash.Hash, height int32, err error) // FetchAddrIndexTip returns the hash and block height of the most recent // block which has had its address index populated. It will return // ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the // addrindex hasn't yet been built up. - FetchAddrIndexTip() (sha *chainhash.Hash, height int64, err error) + FetchAddrIndexTip() (sha *chainhash.Hash, height int32, err error) // UpdateAddrIndexForBlock updates the stored addrindex with passed // index information for a particular block height. Additionally, it @@ -129,12 +129,12 @@ type Db interface { // transaction which is commited before the function returns. // Addresses are indexed by the raw bytes of their base58 decoded // hash160. - UpdateAddrIndexForBlock(blkSha *chainhash.Hash, height int64, + UpdateAddrIndexForBlock(blkSha *chainhash.Hash, height int32, addrIndex BlockAddrIndex) error // DropAddrIndexForBlock removes all passed address indexes and sets // the current block index below the previous HEAD. - DropAddrIndexForBlock(blkSha *chainhash.Hash, height int64, + DropAddrIndexForBlock(blkSha *chainhash.Hash, height int32, addrIndex BlockAddrIndex) error // FetchTxsForAddr looks up and returns all transactions which either @@ -173,7 +173,7 @@ type TxListReply struct { Sha *chainhash.Hash Tx *wire.MsgTx BlkSha *chainhash.Hash - Height int64 + Height int32 Index uint32 TxSpent []bool Err error diff --git a/database/interface_test.go b/database/interface_test.go index e9ddba97..b600768a 100644 --- a/database/interface_test.go +++ b/database/interface_test.go @@ -28,7 +28,7 @@ type testContext struct { t *testing.T dbType string db database.Db - blockHeight int64 + blockHeight int32 blockHash *chainhash.Hash block *dcrutil.Block useSpends bool @@ -215,7 +215,7 @@ func testFetchBlockShaByHeight(tc *testContext) bool { func testFetchBlockShaByHeightErrors(tc *testContext) bool { // Invalid heights must error and return a nil hash. - tests := []int64{-1, tc.blockHeight + 1, tc.blockHeight + 2} + tests := []int32{-1, tc.blockHeight + 1, tc.blockHeight + 2} for i, wantHeight := range tests { hashFromDb, err := tc.db.FetchBlockShaByHeight(wantHeight) if err == nil { @@ -964,7 +964,7 @@ func testInterface(t *testing.T, dbType string) { context := testContext{t: t, dbType: dbType, db: db} t.Logf("Loaded %d blocks for testing %s", len(blocks), dbType) - for height := int64(0); height < int64(len(blocks)); height++ { + for height := int32(0); height < int32(len(blocks)); height++ { // Get the appropriate block and hash and update the test // context accordingly. block := blocks[height] @@ -1000,7 +1000,7 @@ func testInterface(t *testing.T, dbType string) { // Run the data integrity tests again after all blocks have been // inserted to ensure the spend tracking is working properly. context.useSpends = true - for height := int64(0); height < int64(len(blocks)); height++ { + for height := int32(0); height < int32(len(blocks)); height++ { // Get the appropriate block and hash and update the // test context accordingly. block := blocks[height] @@ -1032,14 +1032,14 @@ func testInterface(t *testing.T, dbType string) { - DropAfterBlockBySha(*wire.ShaHash) (err error) x ExistsSha(sha *wire.ShaHash) (exists bool) x FetchBlockBySha(sha *wire.ShaHash) (blk *dcrutil.Block, err error) - x FetchBlockShaByHeight(height int64) (sha *wire.ShaHash, err error) + x FetchBlockShaByHeight(height int32) (sha *wire.ShaHash, err error) - FetchHeightRange(startHeight, endHeight int64) (rshalist []wire.ShaHash, err error) x ExistsTxSha(sha *wire.ShaHash) (exists bool) x FetchTxBySha(txsha *wire.ShaHash) ([]*TxListReply, error) x FetchTxByShaList(txShaList []*wire.ShaHash) []*TxListReply x FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*TxListReply - x InsertBlock(block *dcrutil.Block) (height int64, err error) - x NewestSha() (sha *wire.ShaHash, height int64, err error) + x InsertBlock(block *dcrutil.Block) (height int32, err error) + x NewestSha() (sha *wire.ShaHash, height int32, err error) - RollbackClose() - Sync() */ diff --git a/database/ldb/block.go b/database/ldb/block.go index 604df509..8658fa26 100644 --- a/database/ldb/block.go +++ b/database/ldb/block.go @@ -44,7 +44,7 @@ func (db *LevelDb) fetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err // FetchBlockHeightBySha returns the block height for the given hash. This is // part of the database.Db interface implementation. -func (db *LevelDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int64, error) { +func (db *LevelDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int32, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -74,7 +74,7 @@ func (db *LevelDb) FetchBlockHeaderBySha(sha *chainhash.Hash) (bh *wire.BlockHea return bh, err } -func (db *LevelDb) getBlkLoc(sha *chainhash.Hash) (int64, error) { +func (db *LevelDb) getBlkLoc(sha *chainhash.Hash) (int32, error) { key := shaBlkToKey(sha) data, err := db.lDb.Get(key, db.ro) @@ -88,13 +88,13 @@ func (db *LevelDb) getBlkLoc(sha *chainhash.Hash) (int64, error) { // deserialize blkHeight := binary.LittleEndian.Uint64(data) - return int64(blkHeight), nil + return int32(blkHeight), nil } -func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *chainhash.Hash, rbuf []byte, err error) { +func (db *LevelDb) getBlkByHeight(blkHeight int32) (rsha *chainhash.Hash, rbuf []byte, err error) { var blkVal []byte - key := int64ToKey(blkHeight) + key := int64ToKey(int64(blkHeight)) blkVal, err = db.lDb.Get(key, db.ro) if err != nil { @@ -112,8 +112,8 @@ func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *chainhash.Hash, rbuf [ return &sha, blockdata, nil } -func (db *LevelDb) getBlk(sha *chainhash.Hash) (rblkHeight int64, rbuf []byte, err error) { - var blkHeight int64 +func (db *LevelDb) getBlk(sha *chainhash.Hash) (rblkHeight int32, rbuf []byte, err error) { + var blkHeight int32 blkHeight, err = db.getBlkLoc(sha) if err != nil { @@ -129,13 +129,13 @@ func (db *LevelDb) getBlk(sha *chainhash.Hash) (rblkHeight int64, rbuf []byte, e return blkHeight, buf, nil } -func (db *LevelDb) setBlk(sha *chainhash.Hash, blkHeight int64, buf []byte) { +func (db *LevelDb) setBlk(sha *chainhash.Hash, blkHeight int32, buf []byte) { // serialize var lw [8]byte binary.LittleEndian.PutUint64(lw[0:8], uint64(blkHeight)) shaKey := shaBlkToKey(sha) - blkKey := int64ToKey(blkHeight) + blkKey := int64ToKey(int64(blkHeight)) blkVal := make([]byte, len(sha)+len(buf)) copy(blkVal[0:], sha[:]) @@ -148,7 +148,7 @@ func (db *LevelDb) setBlk(sha *chainhash.Hash, blkHeight int64, buf []byte) { // insertSha stores a block hash and its associated data block with a // previous sha of `prevSha'. // insertSha shall be called with db lock held -func (db *LevelDb) insertBlockData(sha *chainhash.Hash, prevSha *chainhash.Hash, buf []byte) (int64, error) { +func (db *LevelDb) insertBlockData(sha *chainhash.Hash, prevSha *chainhash.Hash, buf []byte) (int32, error) { oBlkHeight, err := db.getBlkLoc(prevSha) if err != nil { // check current block count @@ -178,8 +178,8 @@ func (db *LevelDb) insertBlockData(sha *chainhash.Hash, prevSha *chainhash.Hash, // fetchSha returns the datablock for the given ShaHash. func (db *LevelDb) fetchSha(sha *chainhash.Hash) (rbuf []byte, - rblkHeight int64, err error) { - var blkHeight int64 + rblkHeight int32, err error) { + var blkHeight int32 var buf []byte blkHeight, buf, err = db.getBlk(sha) @@ -211,7 +211,7 @@ func (db *LevelDb) blkExistsSha(sha *chainhash.Hash) (bool, error) { // FetchBlockShaByHeight returns a block hash based on its height in the // block chain. -func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *chainhash.Hash, err error) { +func (db *LevelDb) FetchBlockShaByHeight(height int32) (sha *chainhash.Hash, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -220,8 +220,9 @@ func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *chainhash.Hash, err // fetchBlockShaByHeight returns a block hash based on its height in the // block chain. -func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *chainhash.Hash, err error) { - key := int64ToKey(height) +func (db *LevelDb) fetchBlockShaByHeight(height int32) (rsha *chainhash.Hash, err error) { + key := int64ToKey(int64(height)) + key := int64ToKey(int64(height)) blkVal, err := db.lDb.Get(key, db.ro) if err != nil { @@ -239,11 +240,11 @@ func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *chainhash.Hash, er // heights. Fetch is inclusive of the start height and exclusive of the // ending height. To fetch all hashes from the start height until no // more are present, use the special id `AllShas'. -func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []chainhash.Hash, err error) { +func (db *LevelDb) FetchHeightRange(startHeight, endHeight int32) (rshalist []chainhash.Hash, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() - var endidx int64 + var endidx int32 if endHeight == database.AllShas { endidx = startHeight + 500 } else { @@ -254,7 +255,7 @@ func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []ch for height := startHeight; height < endidx; height++ { // TODO(drahn) fix blkFile from height - key := int64ToKey(height) + key := int64ToKey(int64(height)) blkVal, lerr := db.lDb.Get(key, db.ro) if lerr != nil { break @@ -276,7 +277,7 @@ func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []ch // NewestSha returns the hash and block height of the most recent (end) block of // the block chain. It will return the zero hash, -1 for the block height, and // no error (nil) if there are not any blocks in the database yet. -func (db *LevelDb) NewestSha() (rsha *chainhash.Hash, rblkid int64, err error) { +func (db *LevelDb) NewestSha() (rsha *chainhash.Hash, rblkid int32, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -315,7 +316,7 @@ func (db *LevelDb) checkAddrIndexVersion() error { // updated accordingly by functions that modify the state. This function is // used on start up to load the info into memory. Callers will use the public // version of this function below, which returns our cached copy. -func (db *LevelDb) fetchAddrIndexTip() (*chainhash.Hash, int64, error) { +func (db *LevelDb) fetchAddrIndexTip() (*chainhash.Hash, int32, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -329,14 +330,14 @@ func (db *LevelDb) fetchAddrIndexTip() (*chainhash.Hash, int64, error) { blkHeight := binary.LittleEndian.Uint64(data[32:]) - return &blkSha, int64(blkHeight), nil + return &blkSha, int32(blkHeight), nil } // FetchAddrIndexTip returns the hash and block height of the most recent // block whose transactions have been indexed by address. It will return // ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the // addrindex hasn't yet been built up. -func (db *LevelDb) FetchAddrIndexTip() (*chainhash.Hash, int64, error) { +func (db *LevelDb) FetchAddrIndexTip() (*chainhash.Hash, int32, error) { db.dbLock.Lock() defer db.dbLock.Unlock() diff --git a/database/ldb/dup_test.go b/database/ldb/dup_test.go index b60172c4..aee2eaf8 100644 --- a/database/ldb/dup_test.go +++ b/database/ldb/dup_test.go @@ -62,7 +62,7 @@ func Test_dupTx(t *testing.T) { // Populate with the fisrt 256 blocks, so we have blocks to 'mess with' err = nil out: - for height := int64(0); height < int64(len(blocks)); height++ { + for height := int32(0); height < int32(len(blocks)); height++ { block := blocks[height] if height != 0 { // except for NoVerify which does not allow lookups check inputs diff --git a/database/ldb/insertremove_test.go b/database/ldb/insertremove_test.go index e9854842..268a5a2f 100644 --- a/database/ldb/insertremove_test.go +++ b/database/ldb/insertremove_test.go @@ -66,7 +66,7 @@ func testUnspentInsertStakeTree(t *testing.T) { }() blocks := loadblocks(t) endtest: - for height := int64(0); height < int64(len(blocks))-1; height++ { + for height := int32(0); height < int32(len(blocks))-1; height++ { block := blocks[height] var txneededList []*chainhash.Hash diff --git a/database/ldb/leveldb.go b/database/ldb/leveldb.go index 74ce3f4d..7e34ec96 100644 --- a/database/ldb/leveldb.go +++ b/database/ldb/leveldb.go @@ -31,7 +31,7 @@ var log = btclog.Disabled type tTxInsertData struct { txsha *chainhash.Hash - blockid int64 + blockid int32 txoff int txlen int usedbuf []byte @@ -49,14 +49,14 @@ type LevelDb struct { lbatch *leveldb.Batch - nextBlock int64 + nextBlock int32 lastBlkShaCached bool lastBlkSha chainhash.Hash - lastBlkIdx int64 + lastBlkIdx int32 lastAddrIndexBlkSha chainhash.Hash - lastAddrIndexBlkIdx int64 + lastAddrIndexBlkIdx int32 txUpdateMap map[chainhash.Hash]*txUpdateObj txSpentUpdateMap map[chainhash.Hash]*spentTxUpdate @@ -100,9 +100,9 @@ func OpenDB(args ...interface{}) (database.Db, error) { } // Need to find last block and tx - var lastknownblock, nextunknownblock, testblock int64 + var lastknownblock, nextunknownblock, testblock int32 - increment := int64(100000) + increment := int32(100000) ldb := db.(*LevelDb) var lastSha *chainhash.Hash @@ -380,7 +380,7 @@ func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { } db.lBatch().Delete(shaBlkToKey(blksha)) - db.lBatch().Delete(int64ToKey(height)) + db.lBatch().Delete(int64ToKey(int64(height))) } // update the last block cache @@ -396,7 +396,7 @@ func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. -func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) { +func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int32, rerr error) { // Be careful with this function on syncs. It contains decred changes. // Obtain the previous block first so long as it's not the genesis block diff --git a/database/ldb/operational_test.go b/database/ldb/operational_test.go index 994e4cb7..ce1c85d9 100644 --- a/database/ldb/operational_test.go +++ b/database/ldb/operational_test.go @@ -74,7 +74,7 @@ func TestOperational(t *testing.T) { // testAddrIndexOperations ensures that all normal operations concerning // the optional address index function correctly. -func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) { +func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int32) { // Metadata about the current addr index state should be unset. sha, height, err := db.FetchAddrIndexTip() if err != database.ErrAddrIndexDoesNotExist { @@ -197,7 +197,7 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil. } -func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) { +func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int32) { // Safe to ignore error, since height will be < 0 in "error" case. sha, height, _ := db.FetchAddrIndexTip() if newestBlockIdx != height { @@ -224,7 +224,7 @@ func testOperationalMode(t *testing.T) { defer testDb.cleanUpFunc() err = nil out: - for height := int64(0); height < int64(len(testDb.blocks)); height++ { + for height := int32(0); height < int32(len(testDb.blocks)); height++ { block := testDb.blocks[height] if height != 0 { // except for NoVerify which does not allow lookups check inputs @@ -369,7 +369,7 @@ func testBackout(t *testing.T) { } err = nil - for height := int64(0); height < int64(len(testDb.blocks)); height++ { + for height := int32(0); height < int32(len(testDb.blocks)); height++ { if height == 100 { testDb.db.Sync() } @@ -460,14 +460,14 @@ func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) - blockchain := make(map[int64][]byte) + blockchain := make(map[int32][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockchain); err != nil { t.Errorf("error decoding test blockchain") } blocks = make([]*dcrutil.Block, 0, len(blockchain)) - for height := int64(1); height < int64(len(blockchain)); height++ { + for height := int32(1); height < int32(len(blockchain)); height++ { block, err := dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { t.Errorf("failed to parse block %v", height) @@ -482,18 +482,18 @@ func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) { - var testincrement int64 = 50 - var testcnt int64 = 100 + var testincrement int32 = 50 + var testcnt int32 = 100 shanames := make([]*chainhash.Hash, len(blocks)) - nBlocks := int64(len(blocks)) + nBlocks := int32(len(blocks)) for i := range blocks { shanames[i] = blocks[i].Sha() } - for startheight := int64(0); startheight < nBlocks; startheight += testincrement { + for startheight := int32(0); startheight < nBlocks; startheight += testincrement { endheight := startheight + testcnt if endheight > nBlocks { @@ -506,20 +506,20 @@ func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) } if endheight == database.AllShas { - if int64(len(shalist)) != nBlocks-startheight { + if int32(len(shalist)) != nBlocks-startheight { t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist)) } } else { - if int64(len(shalist)) != testcnt { + if int32(len(shalist)) != testcnt { t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist)) } } for i := range shalist { - sha0 := *shanames[int64(i)+startheight] + sha0 := *shanames[int32(i)+startheight] sha1 := shalist[i] if sha0 != sha1 { - t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1) + t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int32(i)+startheight, startheight, endheight, sha0, sha1) } } } diff --git a/database/ldb/tx.go b/database/ldb/tx.go index b60cb2ef..dbef0f0c 100644 --- a/database/ldb/tx.go +++ b/database/ldb/tx.go @@ -50,7 +50,7 @@ var addrIndexVersionKey = []byte("addrindexversion") type txUpdateObj struct { txSha *chainhash.Hash - blkHeight int64 + blkHeight int32 blkIndex uint32 txoff int txlen int @@ -60,7 +60,7 @@ type txUpdateObj struct { } type spentTx struct { - blkHeight int64 + blkHeight int32 blkIndex uint32 txoff int txlen int @@ -73,7 +73,7 @@ type spentTxUpdate struct { } // InsertTx inserts a tx hash and its associated data into the database. -func (db *LevelDb) InsertTx(txsha *chainhash.Hash, height int64, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { +func (db *LevelDb) InsertTx(txsha *chainhash.Hash, height int32, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -82,7 +82,7 @@ func (db *LevelDb) InsertTx(txsha *chainhash.Hash, height int64, idx uint32, txo // insertTx inserts a tx hash and its associated data into the database. // Must be called with db lock held. -func (db *LevelDb) insertTx(txSha *chainhash.Hash, height int64, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { +func (db *LevelDb) insertTx(txSha *chainhash.Hash, height int32, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { var txU txUpdateObj txU.txSha = txSha @@ -115,7 +115,7 @@ func (db *LevelDb) formatTx(txu *txUpdateObj) []byte { return txW[:] } -func (db *LevelDb) getTxData(txsha *chainhash.Hash) (int64, uint32, int, int, []byte, error) { +func (db *LevelDb) getTxData(txsha *chainhash.Hash) (int32, uint32, int, int, []byte, error) { key := shaTxToKey(txsha) buf, err := db.lDb.Get(key, db.ro) if err != nil { @@ -130,7 +130,7 @@ func (db *LevelDb) getTxData(txsha *chainhash.Hash) (int64, uint32, int, int, [] spentBuf := make([]byte, len(buf)-20) copy(spentBuf, buf[20:]) - return int64(blkHeight), blkIndex, int(txOff), int(txLen), spentBuf, nil + return int32(blkHeight), blkIndex, int(txOff), int(txLen), spentBuf, nil } func (db *LevelDb) getTxFullySpent(txsha *chainhash.Hash) ([]*spentTx, error) { @@ -157,7 +157,7 @@ func (db *LevelDb) getTxFullySpent(txsha *chainhash.Hash) ([]*spentTx, error) { numTxO := binary.LittleEndian.Uint32(buf[offset+20 : offset+24]) sTx := spentTx{ - blkHeight: int64(blkHeight), + blkHeight: int32(blkHeight), blkIndex: blkIndex, txoff: int(txOff), txlen: int(txLen), @@ -278,8 +278,8 @@ func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*chainhash.Hash) []*datab } // fetchTxDataBySha returns several pieces of data regarding the given sha. -func (db *LevelDb) fetchTxDataBySha(txsha *chainhash.Hash) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int64, ridx uint32, rtxspent []byte, err error) { - var blkHeight int64 +func (db *LevelDb) fetchTxDataBySha(txsha *chainhash.Hash) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int32, ridx uint32, rtxspent []byte, err error) { + var blkHeight int32 var blkIndex uint32 var txspent []byte var txOff, txLen int @@ -298,7 +298,7 @@ func (db *LevelDb) fetchTxDataBySha(txsha *chainhash.Hash) (rtx *wire.MsgTx, rbl // fetchTxDataByLoc returns several pieces of data regarding the given tx // located by the block/offset/size location -func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int64, rtxspent []byte, err error) { +func (db *LevelDb) fetchTxDataByLoc(blkHeight int32, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int32, rtxspent []byte, err error) { var blksha *chainhash.Hash var blkbuf []byte @@ -547,7 +547,7 @@ func (db *LevelDb) FetchTxsForAddr(addr dcrutil.Address, skip int, // overhead when storing and retrieving since the entire list must // be fetched each time. func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *chainhash.Hash, - blkHeight int64, addrIndexes database.BlockAddrIndex) error { + blkHeight int32, addrIndexes database.BlockAddrIndex) error { db.dbLock.Lock() defer db.dbLock.Unlock() diff --git a/database/memdb/memdb.go b/database/memdb/memdb.go index 48ced352..a3f14322 100644 --- a/database/memdb/memdb.go +++ b/database/memdb/memdb.go @@ -30,7 +30,7 @@ var ( // a transaction. type tTxInsertData struct { tree int8 - blockHeight int64 + blockHeight int32 offset int spentBuf []bool } @@ -84,7 +84,7 @@ type MemDb struct { // blocksBySha keeps track of block heights by hash. The height can // be used as an index into the blocks slice. - blocksBySha map[chainhash.Hash]int64 + blocksBySha map[chainhash.Hash]int32 // txns holds information about transactions such as which their // block height and spent status of all their outputs. @@ -175,7 +175,7 @@ func (db *MemDb) DropAfterBlockBySha(sha *chainhash.Hash) error { // backwards from the last block through the block just after the passed // block. While doing this unspend all transactions in each block and // remove the block. - endHeight := int64(len(db.blocks) - 1) + endHeight := int32(len(db.blocks) - 1) for i := endHeight; i > height; i-- { blk := db.blocks[i] @@ -247,7 +247,7 @@ func (db *MemDb) fetchBlockBySha(sha *chainhash.Hash) (*dcrutil.Block, error) { // FetchBlockHeightBySha returns the block height for the given hash. This is // part of the database.Db interface implementation. -func (db *MemDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int64, error) { +func (db *MemDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int32, error) { db.Lock() defer db.Unlock() @@ -285,7 +285,7 @@ func (db *MemDb) FetchBlockHeaderBySha(sha *chainhash.Hash) (*wire.BlockHeader, // FetchBlockShaByHeight returns a block hash based on its height in the block // chain. This is part of the database.Db interface implementation. -func (db *MemDb) FetchBlockShaByHeight(height int64) (*chainhash.Hash, error) { +func (db *MemDb) FetchBlockShaByHeight(height int32) (*chainhash.Hash, error) { db.Lock() defer db.Unlock() @@ -293,7 +293,7 @@ func (db *MemDb) FetchBlockShaByHeight(height int64) (*chainhash.Hash, error) { return nil, ErrDbClosed } - numBlocks := int64(len(db.blocks)) + numBlocks := int32(len(db.blocks)) if height < 0 || height > numBlocks-1 { return nil, fmt.Errorf("unable to fetch block height %d since "+ "it is not within the valid range (%d-%d)", height, 0, @@ -309,7 +309,7 @@ func (db *MemDb) FetchBlockShaByHeight(height int64) (*chainhash.Hash, error) { // Fetch is inclusive of the start height and exclusive of the ending height. // To fetch all hashes from the start height until no more are present, use the // special id `AllShas'. This is part of the database.Db interface implementation. -func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]chainhash.Hash, error) { +func (db *MemDb) FetchHeightRange(startHeight, endHeight int32) ([]chainhash.Hash, error) { db.Lock() defer db.Unlock() @@ -320,7 +320,7 @@ func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]chainhash.Has // When the user passes the special AllShas id, adjust the end height // accordingly. if endHeight == database.AllShas { - endHeight = int64(len(db.blocks)) + endHeight = int32(len(db.blocks)) } // Ensure requested heights are sane. @@ -335,7 +335,7 @@ func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]chainhash.Has } // Fetch as many as are availalbe within the specified range. - lastBlockIndex := int64(len(db.blocks) - 1) + lastBlockIndex := int32(len(db.blocks) - 1) hashList := make([]chainhash.Hash, 0, endHeight-startHeight) for i := startHeight; i < endHeight; i++ { if i > lastBlockIndex { @@ -540,7 +540,7 @@ func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*chainhash.Hash) []*databas // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. This is part of the database.Db interface // implementation. -func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { +func (db *MemDb) InsertBlock(block *dcrutil.Block) (int32, error) { db.Lock() defer db.Unlock() @@ -576,7 +576,7 @@ func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. - newHeight := int64(len(db.blocks)) + newHeight := int32(len(db.blocks)) txInFlight := map[chainhash.Hash]int{} // Loop through all transactions and inputs to ensure there are no error // conditions that would prevent them from be inserted into the db. @@ -707,7 +707,7 @@ func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { // the block chain. It will return the zero hash, -1 for the block height, and // no error (nil) if there are not any blocks in the database yet. This is part // of the database.Db interface implementation. -func (db *MemDb) NewestSha() (*chainhash.Hash, int64, error) { +func (db *MemDb) NewestSha() (*chainhash.Hash, int32, error) { db.Lock() defer db.Unlock() @@ -723,25 +723,25 @@ func (db *MemDb) NewestSha() (*chainhash.Hash, int64, error) { } blockSha := db.blocks[numBlocks-1].BlockSha() - return &blockSha, int64(numBlocks - 1), nil + return &blockSha, int32(numBlocks - 1), nil } // FetchAddrIndexTip isn't currently implemented. This is a part of the // database.Db interface implementation. -func (db *MemDb) FetchAddrIndexTip() (*chainhash.Hash, int64, error) { +func (db *MemDb) FetchAddrIndexTip() (*chainhash.Hash, int32, error) { return nil, 0, database.ErrNotImplemented } // UpdateAddrIndexForBlock isn't currently implemented. This is a part of the // database.Db interface implementation. -func (db *MemDb) UpdateAddrIndexForBlock(*chainhash.Hash, int64, +func (db *MemDb) UpdateAddrIndexForBlock(*chainhash.Hash, int32, database.BlockAddrIndex) error { return database.ErrNotImplemented } // DropAddrIndexForBlock isn't currently implemented. This is a part of the // database.Db interface implementation. -func (db *MemDb) DropAddrIndexForBlock(*chainhash.Hash, int64, +func (db *MemDb) DropAddrIndexForBlock(*chainhash.Hash, int32, database.BlockAddrIndex) error { return database.ErrNotImplemented } @@ -795,7 +795,7 @@ func (db *MemDb) Sync() error { func newMemDb() *MemDb { db := MemDb{ blocks: make([]*wire.MsgBlock, 0, 200000), - blocksBySha: make(map[chainhash.Hash]int64), + blocksBySha: make(map[chainhash.Hash]int32), txns: make(map[chainhash.Hash][]*tTxInsertData), } return &db diff --git a/database/reorg_test.go b/database/reorg_test.go index f849c34b..323562c9 100644 --- a/database/reorg_test.go +++ b/database/reorg_test.go @@ -39,7 +39,7 @@ func testReorganization(t *testing.T, dbType string) { // Find where chain forks var forkHash chainhash.Hash - var forkHeight int64 + var forkHeight int32 for i, _ := range blocks { if blocks[i].Sha().IsEqual(blocksReorg[i].Sha()) { blkHash := blocks[i].Sha() @@ -49,7 +49,7 @@ func testReorganization(t *testing.T, dbType string) { } // Insert all blocks from chain 1 - for i := int64(0); i < int64(len(blocks)); i++ { + for i := int32(0); i < int32(len(blocks)); i++ { blkHash := blocks[i].Sha() if err != nil { t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) @@ -68,7 +68,7 @@ func testReorganization(t *testing.T, dbType string) { } // Insert blocks from the other chain to simulate a reorg - for i := forkHeight + 1; i < int64(len(blocksReorg)); i++ { + for i := forkHeight + 1; i < intu32(len(blocksReorg)); i++ { blkHash := blocksReorg[i].Sha() if err != nil { t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) @@ -84,7 +84,7 @@ func testReorganization(t *testing.T, dbType string) { t.Fatalf("Error getting newest block info") } - for i := int64(0); i <= maxHeight; i++ { + for i := int32(0); i <= maxHeight; i++ { blkHash, err := db.FetchBlockShaByHeight(i) if err != nil { t.Fatalf("Error fetching SHA for block %d: %v", i, err) diff --git a/dcrd.go b/dcrd.go index 0329a4d0..d1c0a68a 100644 --- a/dcrd.go +++ b/dcrd.go @@ -118,17 +118,15 @@ func dcrdMain(serverChan chan<- *server) error { dcrdLog.Errorf("%v", err) return err } - defer func() { - err := tmdb.Store(cfg.DataDir, "ticketdb.gob") - if err != nil { - dcrdLog.Errorf("Failed to store ticket database: %v", err.Error()) - } - }() defer tmdb.Close() // Ensure the databases are sync'd and closed on Ctrl+C. addInterruptHandler(func() { dcrdLog.Infof("Gracefully shutting down the database...") + err := tmdb.Store(cfg.DataDir, "ticketdb.gob") + if err != nil { + dcrdLog.Errorf("Failed to store ticket database: %v", err.Error()) + } db.RollbackClose() }) diff --git a/dcrjson/chainsvrcmds.go b/dcrjson/chainsvrcmds.go index ac1a59f1..1d08bc99 100644 --- a/dcrjson/chainsvrcmds.go +++ b/dcrjson/chainsvrcmds.go @@ -187,6 +187,21 @@ func NewGetBlockHashCmd(index int64) *GetBlockHashCmd { } } +// GetBlockHeaderCmd defines the getblockheader JSON-RPC command. +type GetBlockHeaderCmd struct { + Hash string + Verbose *bool `jsonrpcdefault:"true"` +} + +// NewGetBlockHeaderCmd returns a new instance which can be used to issue a +// getblockheader JSON-RPC command. +func NewGetBlockHeaderCmd(hash string, verbose *bool) *GetBlockHeaderCmd { + return &GetBlockHeaderCmd{ + Hash: hash, + Verbose: verbose, + } +} + // TemplateRequest is a request object as defined in BIP22 // (https://en.bitcoin.it/wiki/BIP_0022), it is optionally provided as an // pointer argument to GetBlockTemplateCmd. @@ -734,6 +749,7 @@ func init() { MustRegisterCmd("getblockchaininfo", (*GetBlockChainInfoCmd)(nil), flags) MustRegisterCmd("getblockcount", (*GetBlockCountCmd)(nil), flags) MustRegisterCmd("getblockhash", (*GetBlockHashCmd)(nil), flags) + MustRegisterCmd("getblockheader", (*GetBlockHeaderCmd)(nil), flags) MustRegisterCmd("getblocktemplate", (*GetBlockTemplateCmd)(nil), flags) MustRegisterCmd("getchaintips", (*GetChainTipsCmd)(nil), flags) MustRegisterCmd("getconnectioncount", (*GetConnectionCountCmd)(nil), flags) diff --git a/dcrjson/chainsvrcmds_test.go b/dcrjson/chainsvrcmds_test.go index 101725e4..b405262e 100644 --- a/dcrjson/chainsvrcmds_test.go +++ b/dcrjson/chainsvrcmds_test.go @@ -200,6 +200,20 @@ func TestChainSvrCmds(t *testing.T) { marshalled: `{"jsonrpc":"1.0","method":"getblockhash","params":[123],"id":1}`, unmarshalled: &dcrjson.GetBlockHashCmd{Index: 123}, }, + { + name: "getblockheader", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd("getblockheader", "123") + }, + staticCmd: func() interface{} { + return btcjson.NewGetBlockHeaderCmd("123", nil) + }, + marshalled: `{"jsonrpc":"1.0","method":"getblockheader","params":["123"],"id":1}`, + unmarshalled: &btcjson.GetBlockHeaderCmd{ + Hash: "123", + Verbose: btcjson.Bool(true), + }, + }, { name: "getblocktemplate", newCmd: func() (interface{}, error) { @@ -941,6 +955,7 @@ func TestChainSvrCmds(t *testing.T) { t.Errorf("Test #%d (%s) unexpected marshalled data - "+ "got %s, want %s", i, test.name, marshalled, test.marshalled) + t.Errorf("\n%s\n%s", marshalled, test.marshalled) continue } diff --git a/dcrjson/chainsvrresults.go b/dcrjson/chainsvrresults.go index fcf373b8..6dc00b11 100644 --- a/dcrjson/chainsvrresults.go +++ b/dcrjson/chainsvrresults.go @@ -7,6 +7,23 @@ package dcrjson import "encoding/json" +// GetBlockHeaderVerboseResult models the data from the getblockheader command when +// the verbose flag is set. When the verbose flag is not set, getblockheader +// returns a hex-encoded string. +type GetBlockHeaderVerboseResult struct { + Hash string `json:"hash"` + Confirmations uint64 `json:"confirmations"` + Height int32 `json:"height"` + Version int32 `json:"version"` + MerkleRoot string `json:"merkleroot"` + Time int64 `json:"time"` + Nonce uint64 `json:"nonce"` + Bits string `json:"bits"` + Difficulty float64 `json:"difficulty"` + PreviousHash string `json:"previousblockhash,omitempty"` + NextHash string `json:"nextblockhash,omitempty"` +} + // GetBlockVerboseResult models the data from the getblock command when the // verbose flag is set. When the verbose flag is not set, getblock returns a // hex-encoded string. Contains Decred additions. @@ -135,6 +152,13 @@ type GetBlockTemplateResult struct { RejectReasion string `json:"reject-reason,omitempty"` } +// GetMempoolInfoResult models the data returned from the getmempoolinfo +// command. +type GetMempoolInfoResult struct { + Size int64 `json:"size"` + Bytes int64 `json:"bytes"` +} + // GetNetworkInfoResult models the data returned from the getnetworkinfo // command. type GetNetworkInfoResult struct { diff --git a/dcrjson/dcrdextcmds.go b/dcrjson/dcrdextcmds.go index 0877d39f..cdaa9c15 100644 --- a/dcrjson/dcrdextcmds.go +++ b/dcrjson/dcrdextcmds.go @@ -46,19 +46,6 @@ func NewExistsLiveTicketsCmd(txHashBlob string) *ExistsLiveTicketsCmd { } } -// ExistsMempoolTxsCmd defines the existsmempooltxs JSON-RPC command. -type ExistsMempoolTxsCmd struct { - TxHashBlob string -} - -// NewExistsMempoolTxsCmd returns a new instance which can be used to issue an -// existslivetickets JSON-RPC command. -func NewExistsMempoolTxsCmd(txHashBlob string) *ExistsMempoolTxsCmd { - return &ExistsMempoolTxsCmd{ - TxHashBlob: txHashBlob, - } -} - // GetCoinSupplyCmd defines the getcoinsupply JSON-RPC command. type GetCoinSupplyCmd struct{} @@ -126,7 +113,6 @@ func init() { MustRegisterCmd("existsaddress", (*ExistsAddressCmd)(nil), flags) MustRegisterCmd("existsliveticket", (*ExistsLiveTicketCmd)(nil), flags) MustRegisterCmd("existslivetickets", (*ExistsLiveTicketsCmd)(nil), flags) - MustRegisterCmd("existsmempooltxs", (*ExistsMempoolTxsCmd)(nil), flags) MustRegisterCmd("getcoinsupply", (*GetCoinSupplyCmd)(nil), flags) MustRegisterCmd("getstakedifficulty", (*GetStakeDifficultyCmd)(nil), flags) MustRegisterCmd("missedtickets", (*MissedTicketsCmd)(nil), flags) diff --git a/dcrjson/dcrwalletextcmds.go b/dcrjson/dcrwalletextcmds.go index 3dfb5c96..42c854fc 100644 --- a/dcrjson/dcrwalletextcmds.go +++ b/dcrjson/dcrwalletextcmds.go @@ -7,19 +7,6 @@ package dcrjson -// ConsolidateCmd is a type handling custom marshaling and -// unmarshaling of consolidate JSON wallet extension -// commands. -type ConsolidateCmd struct { - Inputs int `json:"inputs"` - Account *string -} - -// NewConsolidateCmd creates a new ConsolidateCmd. -func NewConsolidateCmd(inputs int, acct *string) *ConsolidateCmd { - return &ConsolidateCmd{Inputs: inputs, Account: acct} -} - // SStxInput represents the inputs to an SStx transaction. Specifically a // transactionsha and output number pair, along with the output amounts. type SStxInput struct { @@ -482,7 +469,6 @@ func init() { // server. flags := UFWalletOnly - MustRegisterCmd("consolidate", (*ConsolidateCmd)(nil), flags) MustRegisterCmd("createrawsstx", (*CreateRawSStxCmd)(nil), flags) MustRegisterCmd("createrawssgentx", (*CreateRawSSGenTxCmd)(nil), flags) MustRegisterCmd("createrawssrtx", (*CreateRawSSRtxCmd)(nil), flags) diff --git a/dcrjson/walletsvrcmds.go b/dcrjson/walletsvrcmds.go index e3879660..a7cff29a 100644 --- a/dcrjson/walletsvrcmds.go +++ b/dcrjson/walletsvrcmds.go @@ -233,6 +233,15 @@ func NewGetTransactionCmd(txHash string, includeWatchOnly *bool) *GetTransaction } } +// GetWalletInfoCmd defines the getwalletinfo JSON-RPC command. +type GetWalletInfoCmd struct{} + +// NewGetWalletInfoCmd returns a new instance which can be used to issue a +// getwalletinfo JSON-RPC command. +func NewGetWalletInfoCmd() *GetWalletInfoCmd { + return &GetWalletInfoCmd{} +} + // ImportPrivKeyCmd defines the importprivkey JSON-RPC command. type ImportPrivKeyCmd struct { PrivKey string @@ -647,6 +656,7 @@ func init() { MustRegisterCmd("getreceivedbyaddress", (*GetReceivedByAddressCmd)(nil), flags) MustRegisterCmd("gettransaction", (*GetTransactionCmd)(nil), flags) MustRegisterCmd("getwalletfee", (*GetWalletFeeCmd)(nil), flags) + MustRegisterCmd("getwalletinfo", (*GetWalletInfoCmd)(nil), flags) MustRegisterCmd("importprivkey", (*ImportPrivKeyCmd)(nil), flags) MustRegisterCmd("keypoolrefill", (*KeyPoolRefillCmd)(nil), flags) MustRegisterCmd("listaccounts", (*ListAccountsCmd)(nil), flags) diff --git a/dcrjson/walletsvrcmds_test.go b/dcrjson/walletsvrcmds_test.go index f3446e23..ad9e145c 100644 --- a/dcrjson/walletsvrcmds_test.go +++ b/dcrjson/walletsvrcmds_test.go @@ -350,6 +350,17 @@ func TestWalletSvrCmds(t *testing.T) { IncludeWatchOnly: dcrjson.Bool(true), }, }, + { + name: "getwalletinfo", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd("getwalletinfo") + }, + staticCmd: func() interface{} { + return btcjson.NewGetWalletInfoCmd() + }, + marshalled: `{"jsonrpc":"1.0","method":"getwalletinfo","params":[],"id":1}`, + unmarshalled: &btcjson.GetWalletInfoCmd{}, + }, { name: "importprivkey", newCmd: func() (interface{}, error) { diff --git a/doc.go b/doc.go index b5015578..54fc2223 100644 --- a/doc.go +++ b/doc.go @@ -83,6 +83,8 @@ Application Options: --limitfreerelay= Limit relay of transactions with no transaction fee to the given amount in thousands of bytes per minute (15) + --norelaypriority Do not require free or low-fee transactions to have + high priority for relaying --maxorphantx= Max number of orphan transactions to keep in memory (1000) --generate= Generate (mine) decreds using the CPU diff --git a/docs/code_contribution_guidelines.md b/docs/code_contribution_guidelines.md index 762f28c4..a08f449b 100644 --- a/docs/code_contribution_guidelines.md +++ b/docs/code_contribution_guidelines.md @@ -6,6 +6,7 @@ 4.1. [Share Early, Share Often](#ShareEarly)
4.2. [Testing](#Testing)
4.3. [Code Documentation and Commenting](#CodeDocumentation)
+4.4. [Model Git Commit Messages](#ModelGitCommitMessages)
5. [Code Approval Process](#CodeApproval)
5.1 [Code Review](#CodeReview)
5.2 [Rework Code (if needed)](#CodeRework)
@@ -194,6 +195,52 @@ if amt < 5460 { but it was left as a magic number to show how much of a difference a good comment can make. + +### 4.4 Code Documentation and Commenting + +This project prefers to keep a clean commit history with well-formed commit +messages. This section illustrates a model commit message and provides a bit +of background for it. This content was originally created by Tim Pope and made +available on his website, however that website is no longer active, so it is +being provided here. + +Here’s a model Git commit message: + +``` +Short (50 chars or less) summary of changes + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of an email and the rest of the text as the body. The blank +line separating the summary from the body is critical (unless you omit +the body entirely); tools like rebase can get confused if you run the +two together. + +Write your commit message in the present tense: "Fix bug" and not "Fixed +bug." This convention matches up with commit messages generated by +commands like git merge and git revert. + +Further paragraphs come after blank lines. + +- Bullet points are okay, too +- Typically a hyphen or asterisk is used for the bullet, preceded by a + single space, with blank lines in between, but conventions vary here +- Use a hanging indent +``` + +Here are some of the reasons why wrapping your commit messages to 72 columns is +a good thing. + +- git log doesn’t do any special special wrapping of the commit messages. With + the default pager of less -S, this means your paragraphs flow far off the edge + of the screen, making them difficult to read. On an 80 column terminal, if we + subtract 4 columns for the indent on the left and 4 more for symmetry on the + right, we’re left with 72 columns. +- git format-patch --stdout converts a series of commits to a series of emails, + using the messages for the message body. Good email netiquette dictates we + wrap our plain text emails such that there’s room for a few levels of nested + reply indicators without overflow in an 80 column terminal. + ### 5. Code Approval Process diff --git a/docs/default_ports.md b/docs/default_ports.md index 191e533d..0d7a620a 100644 --- a/docs/default_ports.md +++ b/docs/default_ports.md @@ -2,9 +2,9 @@ While dcrd is highly configurable when it comes to the network configuration, the following is intended to be a quick reference for the default ports used so port forwarding can be configured as required. -dcrd provides a `--upnp` flag which can be used to automatically map the Decred +dcrd provides a `--upnp` flag which can be used to automatically map the bitcoin peer-to-peer listening port if your router supports UPnP. If your router does -not support UPnP, or you don't wish to use it, please note that only the Decred +not support UPnP, or you don't wish to use it, please note that only the bitcoin peer-to-peer port should be forwarded unless you specifically want to allow RPC access to your dcrd from external sources such as in more advanced network configurations. diff --git a/docs/json_rpc_api.md b/docs/json_rpc_api.md index f47d1edd..bb6e30da 100644 --- a/docs/json_rpc_api.md +++ b/docs/json_rpc_api.md @@ -294,6 +294,20 @@ the method name for further details such as parameter and return information. [Return to Overview](#MethodOverview)
*** +
+ +| | | +|---|---| +|Method|getblockheader| +|Parameters|1. block hash (string, required) - the hash of the block
2. verbose (boolean, optional, default=true) - specifies the block header is returned as a JSON object instead of a hex-encoded string| +|Description|Returns hex-encoded bytes of the serialized block header.| +|Returns (verbose=false)|`"data" (string) hex-encoded bytes of the serialized block`| +|Returns (verbose=true)|`{ (json object)`
  `"hash": "blockhash", (string) the hash of the block (same as provided)`
  `"confirmations": n, (numeric) the number of confirmations`
  `"height": n, (numeric) the height of the block in the block chain`
  `"version": n, (numeric) the block version`
  `"merkleroot": "hash", (string) root hash of the merkle tree`
  `"time": n, (numeric) the block time in seconds since 1 Jan 1970 GMT`
  `"nonce": n, (numeric) the block nonce`
  `"bits": n, (numeric) the bits which represent the block difficulty`
  `"difficulty": n.nn, (numeric) the proof-of-work difficulty as a multiple of the minimum difficulty`
  `"previousblockhash": "hash", (string) the hash of the previous block`
  `"nextblockhash": "hash", (string) the hash of the next block (only if there is one)`
`}`| +|Example Return (verbose=false)|`"0200000035ab154183570282ce9afc0b494c9fc6a3cfea05aa8c1add2ecc564900000000`
`38ba3d78e4500a5a7570dbe61960398add4410d278b21cd9708e6d9743f374d544fc0552`
`27f1001c29c1ea3b"`
**Newlines added for display purposes. The actual return does not contain newlines.**| +|Example Return (verbose=true)|`{`
  `"hash": "00000000009e2958c15ff9290d571bf9459e93b19765c6801ddeccadbb160a1e",`
  `"confirmations": 392076,`
  `"height": 100000,`
  `"version": 2,`
  `"merkleroot": "d574f343976d8e70d91cb278d21044dd8a396019e6db70755a0a50e4783dba38",`
  `"time": 1376123972,`
  `"nonce": 1005240617,`
  `"bits": "1c00f127",`
  `"difficulty": 271.75767393,`
  `"previousblockhash": "000000004956cc2edd1a8caa05eacfa3c69f4c490bfc9ace820257834115ab35",`
  `"nextblockhash": "0000000000629d100db387f37d0f37c51118f250fb0946310a8c37316cbc4028"`
`}`| +[Return to Overview](#MethodOverview)
+ +***
| | | @@ -352,6 +366,18 @@ the method name for further details such as parameter and return information. |Example Return|`{`
  `"version": 70000`
  `"protocolversion": 70001, `
  `"blocks": 298963,`
  `"timeoffset": 0,`
  `"connections": 17,`
  `"proxy": "",`
  `"difficulty": 8000872135.97,`
  `"testnet": false,`
  `"relayfee": 0.00001,`
`}`| [Return to Overview](#MethodOverview)
+*** +
+ +| | | +|---|---| +|Method|getmempoolinfo| +|Parameters|None| +|Description|Returns a JSON object containing mempool-related information.| +|Returns|`{ (json object)`
  `"bytes": n, (numeric) size in bytes of the mempool`
  `"size": n, (numeric) number of transactions in the mempool`
`}`| +Example Return|`{`
  `"bytes": 310768,`
  `"size": 157,`
`}`| +[Return to Overview](#MethodOverview)
+ ***
@@ -670,7 +696,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. username (string, required)
2. passphrase (string, required)| |Description|Authenticate the connection against the username and password configured for the RPC server.
Invoking any other method before authenticating with this command will close the connection.
NOTE: This is only required if an HTTP Authorization header is not being used.| |Returns|Success: Nothing
Failure: Nothing (websocket disconnected)| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -683,7 +709,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|None| |Description|Request notifications for whenever a block is connected or disconnected from the main (best) chain.
NOTE: If a client subscribes to both block and transaction (recvtx and redeemingtx) notifications, the blockconnected notification will be sent after all transaction notifications have been sent. This allows clients to know when all relevant transactions for a block have been received.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
***
@@ -695,7 +721,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|None| |Description|Cancel sending notifications for whenever a block is connected or disconnected from the main (best) chain.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -708,7 +734,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`| |Description|Send a recvtx notification when a transaction added to mempool or appears in a newly-attached block contains a txout pkScript sending to any of the passed addresses. Matching outpoints are automatically registered for redeemingtx notifications.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -721,7 +747,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`| |Description|Cancel registered receive notifications for each passed address.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -734,7 +760,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`| |Description|Send a redeemingtx notification when a transaction spending an outpoint appears in mempool (if relayed to this dcrd instance) and when such a transaction first appears in a newly-attached block.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -747,7 +773,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`| |Description|Cancel registered spending notifications for each passed outpoint.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -760,7 +786,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. BeginBlock (string, required) block hash to begin rescanning from
2. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`
3. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`
4. EndBlock (string, optional) hash of final block to rescan| |Description|Rescan block chain for transactions to addresses, starting at block BeginBlock and ending at EndBlock. The current known UTXO set for all passed addresses at height BeginBlock should included in the Outpoints argument. If EndBlock is omitted, the rescan continues through the best block in the main chain. Additionally, if no EndBlock is provided, the client is automatically registered for transaction notifications for all rescanned addresses and the final UTXO set. Rescan results are sent as recvtx and redeemingtx notifications. This call returns once the rescan completes.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** @@ -773,7 +799,7 @@ user. Click the method name for further details such as parameter and return in |Parameters|1. verbose (boolean, optional, default=false) - specifies which type of notification to receive. If verbose is true, then the caller receives [txacceptedverbose](#txacceptedverbose), otherwise the caller receives [txaccepted](#txaccepted)| |Description|Send either a [txaccepted](#txaccepted) or a [txacceptedverbose](#txacceptedverbose) notification when a new transaction is accepted into the mempool.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
*** diff --git a/mempool.go b/mempool.go index 6c999117..f6c2b775 100644 --- a/mempool.go +++ b/mempool.go @@ -95,7 +95,7 @@ const ( // maxSSGensDoubleSpends is the maximum number of SSGen double spends // allowed in the pool. - maxSSGensDoubleSpends = 5 + maxSSGensDoubleSpends = 64 // heightDiffToPruneTicket is the number of blocks to pass by in terms // of height before old tickets are pruned. @@ -117,7 +117,7 @@ type TxDesc struct { Tx *dcrutil.Tx // Transaction. Type stake.TxType // Transcation type. Added time.Time // Time when added to pool. - Height int64 // Blockheight when added to pool. + Height int32 // Blockheight when added to pool. Fee int64 // Transaction fees. startingPriority float64 // Priority when added to the pool. } @@ -398,6 +398,11 @@ func (mp *txMemPool) SortParentsByVotes(currentTopBlock chainhash.Hash, // relay fee. In particular, if the cost to the network to spend coins is more // than 1/3 of the minimum transaction relay fee, it is considered dust. func isDust(txOut *wire.TxOut, params *chaincfg.Params) bool { + // Unspendable outputs are considered dust. + if txscript.IsUnspendable(txOut.PkScript) { + return true + } + // The total serialized size consists of the output and the associated // input script to redeem it. Since there is no input script // to redeem it yet, use the minimum size of a typical input script. @@ -539,7 +544,7 @@ func checkPkScriptStandard(version uint16, pkScript []byte, // of recognized forms, and not containing "dust" outputs (those that are // so small it costs more to process them than they are worth). func (mp *txMemPool) checkTransactionStandard(tx *dcrutil.Tx, txType stake.TxType, - height int64) error { + height int32) error { msgTx := tx.MsgTx() // The transaction must be a currently supported version. @@ -925,30 +930,6 @@ func (mp *txMemPool) HaveTransaction(hash *chainhash.Hash) bool { return mp.haveTransaction(hash) } -// haveTransactions returns whether or not the passed transactions already exist -// in the main pool or in the orphan pool. -// -// This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) haveTransactions(hashes []*chainhash.Hash) []bool { - have := make([]bool, len(hashes)) - for i := range hashes { - have[i] = mp.haveTransaction(hashes[i]) - } - return have -} - -// HaveTransactions returns whether or not the passed transactions already exist -// in the main pool or in the orphan pool. -// -// This function is safe for concurrent access. -func (mp *txMemPool) HaveTransactions(hashes []*chainhash.Hash) []bool { - // Protect concurrent access. - mp.RLock() - defer mp.RUnlock() - - return mp.haveTransactions(hashes) -} - // removeTransaction is the internal function which implements the public // RemoveTransaction. See the comment for RemoveTransaction for more details. // @@ -1029,7 +1010,7 @@ func (mp *txMemPool) RemoveDoubleSpends(tx *dcrutil.Tx) { func (mp *txMemPool) addTransaction( tx *dcrutil.Tx, txType stake.TxType, - height, + height int32, fee int64) { // Add the transaction to the pool and mark the referenced outpoints // as spent by the pool. @@ -1171,7 +1152,7 @@ func (mp *txMemPool) FindTxForAddr(addr dcrutil.Address) []*dcrutil.Tx { // which are currently in the mempool and hence not mined into a block yet, // contribute no additional input age to the transaction. func calcInputValueAge(txDesc *TxDesc, txStore blockchain.TxStore, - nextBlockHeight int64) float64 { + nextBlockHeight int32) float64 { var totalInputAge float64 for _, txIn := range txDesc.Tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash @@ -1184,7 +1165,7 @@ func calcInputValueAge(txDesc *TxDesc, txStore blockchain.TxStore, // have their block height set to a special constant. // Their input age should computed as zero since their // parent hasn't made it into a block yet. - var inputAge int64 + var inputAge int32 if txData.BlockHeight == mempoolHeight { inputAge = 0 } else { @@ -1194,7 +1175,7 @@ func calcInputValueAge(txDesc *TxDesc, txStore blockchain.TxStore, // Sum the input value times age. originTxOut := txData.Tx.MsgTx().TxOut[originIndex] inputValue := originTxOut.Value - totalInputAge += float64(inputValue * inputAge) + totalInputAge += float64(inputValue * int64(inputAge)) } } @@ -1268,7 +1249,7 @@ func (td *TxDesc) StartingPriority(txStore blockchain.TxStore) float64 { // CurrentPriority calculates the current priority of this tx descriptor's // underlying transaction relative to the next block height. func (td *TxDesc) CurrentPriority(txStore blockchain.TxStore, - nextBlockHeight int64) float64 { + nextBlockHeight int32) float64 { inputAge := calcInputValueAge(td, txStore, nextBlockHeight) return calcPriority(td.Tx, inputAge) } diff --git a/mining.go b/mining.go index d4c35786..139a6f6a 100644 --- a/mining.go +++ b/mining.go @@ -42,7 +42,6 @@ const ( // which have not been mined into a block yet. type txPrioItem struct { tx *dcrutil.Tx - txType stake.TxType fee int64 priority float64 feePerKB float64 @@ -170,7 +169,7 @@ type BlockTemplate struct { block *wire.MsgBlock fees []int64 sigOpCounts []int64 - height int64 + height int32 validPayAddress bool } @@ -286,7 +285,7 @@ func getCoinbaseExtranonces(msgBlock *wire.MsgBlock) []uint64 { // block by regenerating the coinbase script with the passed value and block // height. It also recalculates and updates the new merkle root that results // from changing the coinbase script. -func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int64, +func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int32, extraNonces []uint64) error { // First block has no extranonce. if blockHeight == 1 { @@ -322,7 +321,7 @@ func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int64, // address handling is useful. func createCoinbaseTx(coinbaseScript []byte, opReturnPkScript []byte, - nextBlockHeight int64, + nextBlockHeight int32, addr dcrutil.Address, voters uint16, params *chaincfg.Params) (*dcrutil.Tx, error) { @@ -443,7 +442,7 @@ func createCoinbaseTx(coinbaseScript []byte, // to the passed transaction as spent. It also adds the passed transaction to // the store at the provided height. func spendTransaction(txStore blockchain.TxStore, tx *dcrutil.Tx, - height int64) error { + height int32) error { for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index @@ -1159,10 +1158,23 @@ mempoolLoop: continue } + // Fetch all of the transactions referenced by the inputs to + // this transaction. NOTE: This intentionally does not fetch + // inputs from the mempool since a transaction which depends on + // other transactions in the mempool must come after those + // dependencies in the final generated block. + txStore, err := blockManager.FetchTransactionStore(tx, treeValid) + if err != nil { + minrLog.Warnf("Unable to fetch transaction store for "+ + "tx %s: %v", tx.Sha(), err) + continue + } + // Need this for a check below for stake base input, and to check // the ticket number. - isSSGen := txDesc.Type == stake.TxTypeSSGen - if isSSGen { + isSSGen, _ := stake.IsSSGen(tx) + + if isSSGen, _ := stake.IsSSGen(tx); isSSGen { blockHash, blockHeight, err := stake.GetSSGenBlockVotedOn(tx) if err != nil { // Should theoretically never fail. minrLog.Tracef("Skipping ssgen tx %s because of failure "+ @@ -1178,18 +1190,6 @@ mempoolLoop: } } - // Fetch all of the transactions referenced by the inputs to - // this transaction. NOTE: This intentionally does not fetch - // inputs from the mempool since a transaction which depends on - // other transactions in the mempool must come after those - // dependencies in the final generated block. - txStore, err := blockManager.FetchTransactionStore(tx, treeValid) - if err != nil { - minrLog.Warnf("Unable to fetch transaction store for "+ - "tx %s: %v", tx.Sha(), err) - continue - } - // Calculate the input value age sum for the transaction. This // is comprised of the sum all of input amounts multiplied by // their respective age (number of confirmations since the @@ -1197,7 +1197,7 @@ mempoolLoop: // setup dependencies for any transactions which reference other // transactions in the mempool so they can be properly ordered // below. - prioItem := &txPrioItem{tx: txDesc.Tx, txType: txDesc.Type} + prioItem := &txPrioItem{tx: txDesc.Tx} inputValueAge := float64(0.0) for i, txIn := range tx.MsgTx().TxIn { // Evaluate if this is a stakebase input or not. If it is, continue @@ -1344,13 +1344,13 @@ mempoolLoop: tx := prioItem.tx // Store if this is an SStx or not. - isSStx := prioItem.txType == stake.TxTypeSStx + isSStx, err := stake.IsSStx(tx) // Store if this is an SSGen or not. - isSSGen := prioItem.txType == stake.TxTypeSSGen + isSSGen, err := stake.IsSSGen(tx) // Store if this is an SSRtx or not. - isSSRtx := prioItem.txType == stake.TxTypeSSRtx + isSSRtx, err := stake.IsSSRtx(tx) // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will @@ -2029,3 +2029,31 @@ func UpdateBlockTime(msgBlock *wire.MsgBlock, bManager *blockManager) error { return nil } + +// UpdateExtraNonce updates the extra nonce in the coinbase script of the passed +// block by regenerating the coinbase script with the passed value and block +// height. It also recalculates and updates the new merkle root that results +// from changing the coinbase script. +func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int32, extraNonce uint64) error { + coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce) + if err != nil { + return err + } + if len(coinbaseScript) > blockchain.MaxCoinbaseScriptLen { + return fmt.Errorf("coinbase transaction script length "+ + "of %d is out of range (min: %d, max: %d)", + len(coinbaseScript), blockchain.MinCoinbaseScriptLen, + blockchain.MaxCoinbaseScriptLen) + } + msgBlock.Transactions[0].TxIn[0].SignatureScript = coinbaseScript + + // TODO(davec): A dcrutil.Block should use saved in the state to avoid + // recalculating all of the other transaction hashes. + // block.Transactions[0].InvalidateCache() + + // Recalculate the merkle root with the updated extra nonce. + block := dcrutil.NewBlock(msgBlock) + merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) + msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + return nil +} diff --git a/peer.go b/peer.go index 0038d869..af1b344b 100644 --- a/peer.go +++ b/peer.go @@ -60,6 +60,17 @@ const ( // queueEmptyFrequency is the frequency for the emptying of the queue. queueEmptyFrequency = 500 * time.Millisecond + + // connectionRetryInterval is the base amount of time to wait in between + // retries when connecting to persistent peers. It is adjusted by the + // number of retries such that there is a retry backoff. + connectionRetryInterval = time.Second * 10 + + // maxConnectionRetryInterval is the max amount of time retrying of a + // persistent peer is allowed to grow to. This is necessary since the + // retry logic uses a backoff mechanism which increases the interval + // base done the number of retries that have been done. + maxConnectionRetryInterval = time.Minute * 5 ) var ( @@ -595,28 +606,8 @@ func (p *peer) pushMerkleBlockMsg(sha *chainhash.Hash, doneChan, waitChan chan s } // Generate a merkle block by filtering the requested block according - // to the filter for the peer and fetch any matched transactions from - // the database. - merkle, matchedHashes := bloom.NewMerkleBlock(blk, p.filter) - txList := p.server.db.FetchTxByShaList(matchedHashes) - - // Warn on any missing transactions which should not happen since the - // matched transactions come from an existing block. Also, find the - // final valid transaction index for later. - finalValidTxIndex := -1 - for i, txR := range txList { - if txR.Err != nil || txR.Tx == nil { - warnMsg := fmt.Sprintf("Failed to fetch transaction "+ - "%v which was matched by merkle block %v", - txR.Sha, sha) - if txR.Err != nil { - warnMsg += ": " + err.Error() - } - peerLog.Warnf(warnMsg) - continue - } - finalValidTxIndex = i - } + // to the filter for the peer. + merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, p.filter) // Once we have fetched data wait for any previous operation to finish. if waitChan != nil { @@ -626,20 +617,21 @@ func (p *peer) pushMerkleBlockMsg(sha *chainhash.Hash, doneChan, waitChan chan s // Send the merkleblock. Only send the done channel with this message // if no transactions will be sent afterwards. var dc chan struct{} - if finalValidTxIndex == -1 { + if len(matchedTxIndices) == 0 { dc = doneChan } p.QueueMessage(merkle, dc) // Finally, send any matched transactions. - for i, txR := range txList { + blkTransactions := blk.MsgBlock().Transactions + for i, txIndex := range matchedTxIndices { // Only send the done channel on the final transaction. var dc chan struct{} - if i == finalValidTxIndex { + if i == len(matchedTxIndices)-1 { dc = doneChan } - if txR.Err == nil && txR.Tx != nil { - p.QueueMessage(txR.Tx, dc) + if txIndex < uint32(len(blkTransactions)) { + p.QueueMessage(blkTransactions[txIndex], dc) } } @@ -1074,7 +1066,7 @@ func (p *peer) handleGetBlocksMsg(msg *wire.MsgGetBlocks) { // provided locator are known. This does mean the client will start // over with the genesis block if unknown block locators are provided. // This mirrors the behavior in the reference implementation. - startIdx := int64(1) + startIdx := int32(1) for _, hash := range msg.BlockLocatorHashes { height, err := p.server.db.FetchBlockHeightBySha(hash) if err == nil { @@ -1118,7 +1110,7 @@ func (p *peer) handleGetBlocksMsg(msg *wire.MsgGetBlocks) { iv := wire.NewInvVect(wire.InvTypeBlock, &hashCopy) invMsg.AddInvVect(iv) } - start += int64(len(hashList)) + start += int32(len(hashList)) } // Send the inventory message if there is anything to send. @@ -1139,6 +1131,11 @@ func (p *peer) handleGetBlocksMsg(msg *wire.MsgGetBlocks) { // handleGetHeadersMsg is invoked when a peer receives a getheaders decred // message. func (p *peer) handleGetHeadersMsg(msg *wire.MsgGetHeaders) { + // Ignore getheaders requests if not in sync. + if !p.server.blockManager.IsCurrent() { + return + } + // Attempt to look up the height of the provided stop hash. endIdx := database.AllShas height, err := p.server.db.FetchBlockHeightBySha(&msg.HashStop) @@ -1175,7 +1172,7 @@ func (p *peer) handleGetHeadersMsg(msg *wire.MsgGetHeaders) { // provided locator are known. This does mean the client will start // over with the genesis block if unknown block locators are provided. // This mirrors the behavior in the reference implementation. - startIdx := int64(1) + startIdx := int32(1) for _, hash := range msg.BlockLocatorHashes { height, err := p.server.db.FetchBlockHeightBySha(hash) if err == nil { @@ -1224,7 +1221,7 @@ func (p *peer) handleGetHeadersMsg(msg *wire.MsgGetHeaders) { // Start at the next block header after the latest one on the // next loop iteration. - start += int64(len(hashList)) + start += int32(len(hashList)) } p.QueueMessage(headersMsg, nil) } @@ -1774,7 +1771,7 @@ out: // Create and send as many inv messages as needed to // drain the inventory send queue. - invMsg := wire.NewMsgInv() + invMsg := wire.NewMsgInvSizeHint(uint(invSendQueue.Len())) for e := invSendQueue.Front(); e != nil; e = invSendQueue.Front() { iv := invSendQueue.Remove(e).(*wire.InvVect) @@ -1789,7 +1786,7 @@ out: waiting = queuePacket( outMsg{msg: invMsg}, pendingMsgs, waiting) - invMsg = wire.NewMsgInv() + invMsg = wire.NewMsgInvSizeHint(uint(invSendQueue.Len())) } // Add the inventory that is being relayed to @@ -2117,6 +2114,9 @@ func newOutboundPeer(s *server, addr string, persistent bool, retryCount int64) if p.retryCount > 0 { scaledInterval := connectionRetryInterval.Nanoseconds() * p.retryCount / 2 scaledDuration := time.Duration(scaledInterval) + if scaledDuration > maxConnectionRetryInterval { + scaledDuration = maxConnectionRetryInterval + } srvrLog.Debugf("Retrying connection to %s in %s", addr, scaledDuration) time.Sleep(scaledDuration) } diff --git a/rpcserver.go b/rpcserver.go index ee07f79b..ad38b726 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -157,7 +157,6 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "existsaddress": handleExistsAddress, "existsliveticket": handleExistsLiveTicket, "existslivetickets": handleExistsLiveTickets, - "existsmempooltxs": handleExistsMempoolTxs, "generate": handleGenerate, "getaddednodeinfo": handleGetAddedNodeInfo, "getbestblock": handleGetBestBlock, @@ -165,6 +164,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getblock": handleGetBlock, "getblockcount": handleGetBlockCount, "getblockhash": handleGetBlockHash, + "getblockheader": handleGetBlockHeader, "getblocktemplate": handleGetBlockTemplate, "getcoinsupply": handleGetCoinSupply, "getconnectioncount": handleGetConnectionCount, @@ -173,6 +173,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getgenerate": handleGetGenerate, "gethashespersec": handleGetHashesPerSec, "getinfo": handleGetInfo, + "getmempoolinfo": handleGetMempoolInfo, "getmininginfo": handleGetMiningInfo, "getnettotals": handleGetNetTotals, "getnetworkhashps": handleGetNetworkHashPS, @@ -1257,7 +1258,7 @@ func createVinList(mtx *wire.MsgTx) []dcrjson.Vin { tx := dcrutil.NewTx(mtx) vinList := make([]dcrjson.Vin, len(mtx.TxIn)) for i, v := range mtx.TxIn { - if blockchain.IsCoinBase(tx) { + if blockchain.IsCoinBaseTx(mtx) { vinList[i].Coinbase = hex.EncodeToString(v.SignatureScript) } else { vinList[i].Txid = v.PreviousOutPoint.Hash.String() @@ -1287,7 +1288,7 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []dcrjson.Vou voutList := make([]dcrjson.Vout, len(mtx.TxOut)) for i, v := range mtx.TxOut { voutList[i].N = uint32(i) - voutList[i].Value = float64(v.Value) / dcrutil.AtomsPerCoin + voutList[i].Value = dcrutil.Amount(v.Value), ToCoin() voutList[i].Version = v.Version // The disassembled string will contain [error] inline if the @@ -1319,10 +1320,9 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []dcrjson.Vou // createTxRawResult converts the passed transaction and associated parameters // to a raw transaction JSON object. -func createTxRawResult(chainParams *chaincfg.Params, txHash string, - mtx *wire.MsgTx, blk *dcrutil.Block, maxIdx int64, - blkHash *chainhash.Hash, blkHeight int64, - blkIdx uint32) (*dcrjson.TxRawResult, error) { +func createTxRawResult(chainParams *chaincfg.Params, mtx *wire.MsgTx, + txHash string, blkHeader *wire.BlockHeader, blkHash string, + blkHeight int32, blkIdx uint32, chainHeight int32) (*dcrjson.TxRawResult, error) { mtxHex, err := messageToHex(mtx) if err != nil { @@ -1341,15 +1341,12 @@ func createTxRawResult(chainParams *chaincfg.Params, txHash string, BlockIndex: blkIdx, } - if blk != nil { - blockHeader := &blk.MsgBlock().Header - idx := blk.Height() - + if blkHeader != nil { // This is not a typo, they are identical in bitcoind as well. - txReply.Time = blockHeader.Timestamp.Unix() - txReply.Blocktime = blockHeader.Timestamp.Unix() - txReply.BlockHash = blkHash.String() - txReply.Confirmations = uint64(1 + maxIdx - idx) + txReply.Time = blkHeader.Timestamp.Unix() + txReply.Blocktime = blkHeader.Timestamp.Unix() + txReply.BlockHash = blkHash + txReply.Confirmations = uint64(1 + chainHeight - blkHeight) } return txReply, nil @@ -1516,18 +1513,16 @@ func handleExistsLiveTickets(s *rpcServer, cmd interface{}, txHashBlob, err := hex.DecodeString(c.TxHashBlob) if err != nil { return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad ticket hash blob (unparseable): %v", - err.Error()), + Code: dcrjson.ErrRPCDecodeHexString, + Message: "bad transaction hash blob (unparseable)", } } // It needs to be an exact number of hashes. if len(txHashBlob)%32 != 0 { return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad ticket hash blob (bad length): %v", - len(txHashBlob)), + Code: dcrjson.ErrRPCDecodeHexString, + Message: "bad transaction hash blob (bad length)", } } @@ -1536,13 +1531,6 @@ func handleExistsLiveTickets(s *rpcServer, cmd interface{}, for i := 0; i < hashesLen; i++ { hashes[i], err = chainhash.NewHash( txHashBlob[i*chainhash.HashSize : (i+1)*chainhash.HashSize]) - if err != nil { - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad ticket hash: %v", - err.Error()), - } - } } exists, err := s.server.blockManager.ExistsLiveTickets(hashes) @@ -1551,66 +1539,8 @@ func handleExistsLiveTickets(s *rpcServer, cmd interface{}, } if len(exists) != hashesLen { return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDatabase, - Message: fmt.Sprintf("output of ExistsLiveTickets wrong size "+ - "(want %v, got %v)", hashesLen, len(exists)), - } - } - - // Convert the slice of bools into a compacted set of bit flags. - set := bitset.NewBytes(hashesLen) - for i := range exists { - if exists[i] { - set.Set(i) - } - } - - return hex.EncodeToString([]byte(set)), nil -} - -// handleExistsMempoolTxs implements the existsmempooltxs command. -func handleExistsMempoolTxs(s *rpcServer, cmd interface{}, - closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*dcrjson.ExistsMempoolTxsCmd) - - txHashBlob, err := hex.DecodeString(c.TxHashBlob) - if err != nil { - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad transaction hash blob (unparseable): %v", - err.Error()), - } - } - - // It needs to be an exact number of hashes. - if len(txHashBlob)%32 != 0 { - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad transaction hash blob (bad length): %v", - len(txHashBlob)), - } - } - - hashesLen := len(txHashBlob) / 32 - hashes := make([]*chainhash.Hash, hashesLen) - for i := 0; i < hashesLen; i++ { - hashes[i], err = chainhash.NewHash( - txHashBlob[i*chainhash.HashSize : (i+1)*chainhash.HashSize]) - if err != nil { - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDecodeHexString, - Message: fmt.Sprintf("bad transaction hash: %v", - err.Error()), - } - } - } - - exists := s.server.txMemPool.HaveTransactions(hashes) - if len(exists) != hashesLen { - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCDatabase, - Message: fmt.Sprintf("output of ExistsMempoolTxs wrong size "+ - "(want %v, got %v)", hashesLen, len(exists)), + Code: dcrjson.ErrRPCDatabase, + Message: "output of ExistsLiveTickets wrong size", } } @@ -1874,7 +1804,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i PoolSize: blockHeader.PoolSize, Time: blockHeader.Timestamp.Unix(), Confirmations: uint64(1 + maxIdx - idx), - Height: idx, + Height: int64(idx), Size: int32(len(buf)), Bits: strconv.FormatInt(int64(blockHeader.Bits), 16), SBits: sbitsFloat, @@ -1902,11 +1832,9 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i txns := blk.Transactions() rawTxns := make([]dcrjson.TxRawResult, len(txns)) for i, tx := range txns { - txHash := tx.Sha().String() - mtx := tx.MsgTx() - rawTxn, err := createTxRawResult(s.server.chainParams, - txHash, mtx, blk, maxIdx, sha, blk.Height(), uint32(i)) + tx.MsgTx(), tx.Sha().String(), blockHeader, + sha.String(), idx, uint32(i), maxIdx) if err != nil { return nil, err } @@ -1933,7 +1861,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i // Get next block unless we are already at the top. if idx < maxIdx && idx >= 0 { var shaNext *chainhash.Hash - shaNext, err = s.server.db.FetchBlockShaByHeight(int64(idx + 1)) + shaNext, err = s.server.db.FetchBlockShaByHeight(idx + 1) if err != nil { context := "No next block" return nil, internalRPCError(err.Error(), context) @@ -1961,7 +1889,7 @@ func handleGetBlockCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleGetBlockHash implements the getblockhash command. func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { c := cmd.(*dcrjson.GetBlockHashCmd) - sha, err := s.server.db.FetchBlockShaByHeight(c.Index) + sha, err := s.server.db.FetchBlockShaByHeight(int32(c.Index)) if err != nil { return nil, &dcrjson.RPCError{ Code: dcrjson.ErrRPCOutOfRange, @@ -1972,6 +1900,71 @@ func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{} return sha.String(), nil } +// handleGetBlockHeader implements the getblockheader command. +func handleGetBlockHeader(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*btcjson.GetBlockHeaderCmd) + + sha, err := wire.NewShaHashFromStr(c.Hash) + if err != nil { + return nil, err + } + + if c.Verbose == nil || *c.Verbose { + blk, err := s.server.db.FetchBlockBySha(sha) + if err != nil { + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key: " + err.Error(), + } + } + + _, maxIdx, err := s.server.db.NewestSha() + if err != nil { + context := "Failed to get newest hash" + return nil, internalRPCError(err.Error(), context) + } + + var shaNextStr string + shaNext, err := s.server.db.FetchBlockShaByHeight(blk.Height() + 1) + if err == nil { + shaNextStr = shaNext.String() + } + + msgBlock := blk.MsgBlock() + blockHeaderReply := btcjson.GetBlockHeaderVerboseResult{ + Hash: c.Hash, + Confirmations: uint64(1 + maxIdx - blk.Height()), + Height: int32(blk.Height()), + Version: msgBlock.Header.Version, + MerkleRoot: msgBlock.Header.MerkleRoot.String(), + NextHash: shaNextStr, + PreviousHash: msgBlock.Header.PrevBlock.String(), + Nonce: uint64(msgBlock.Header.Nonce), + Time: msgBlock.Header.Timestamp.Unix(), + Bits: strconv.FormatInt(int64(msgBlock.Header.Bits), 16), + Difficulty: getDifficultyRatio(msgBlock.Header.Bits), + } + return blockHeaderReply, nil + } + + // Verbose disabled + blkHeader, err := s.server.db.FetchBlockHeaderBySha(sha) + if err != nil { + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key: " + err.Error(), + } + } + + buf := bytes.NewBuffer(make([]byte, 0, wire.MaxBlockHeaderPayload)) + if err = blkHeader.BtcEncode(buf, maxProtocolVersion); err != nil { + errStr := fmt.Sprintf("Failed to serialize data: %v", err) + return nil, internalRPCError(errStr, "") + } + + return hex.EncodeToString(buf.Bytes()), nil +} + // encodeTemplateID encodes the passed details into an ID that can be used to // uniquely identify a block template. func encodeTemplateID(prevHash *chainhash.Hash, lastGenerated time.Time) string { @@ -3126,6 +3119,23 @@ func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in return ret, nil } +// handleGetMempoolInfo implements the getmempoolinfo command. +func handleGetMempoolInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + txD := s.server.txMemPool.TxDescs() + + var numBytes int64 + for _, desc := range txD { + numBytes += int64(desc.Tx.MsgTx().SerializeSize()) + } + + ret := &btcjson.GetMempoolInfoResult{ + Size: int64(len(txD)), + Bytes: numBytes, + } + + return ret, nil +} + // handleGetMiningInfo implements the getmininginfo command. We only return the // fields that are not related to wallet functionality. func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { @@ -3162,7 +3172,7 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ } result := dcrjson.GetMiningInfoResult{ - Blocks: height, + Blocks: int64(height), CurrentBlockSize: uint64(len(blockBytes)), CurrentBlockTx: uint64(len(block.MsgBlock().Transactions)), Difficulty: getDifficultyRatio(block.MsgBlock().Header.Bits), @@ -3206,9 +3216,9 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru // since we can't reasonably calculate the number of network hashes // per second from invalid values. When it's negative, use the current // best block height. - endHeight := int64(-1) + endHeight := int32(-1) if c.Height != nil { - endHeight = int64(*c.Height) + endHeight = int32(*c.Height) } if endHeight > newestHeight || endHeight == 0 { return int64(0), nil @@ -3226,12 +3236,12 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru blocksPerRetarget := int64(s.server.chainParams.TargetTimespan / s.server.chainParams.TimePerBlock) - numBlocks := int64(120) + numBlocks := int32(120) if c.Blocks != nil { - numBlocks = int64(*c.Blocks) + numBlocks = int32(*c.Blocks) } - var startHeight int64 + var startHeight int32 if numBlocks <= 0 { startHeight = endHeight - ((endHeight % blocksPerRetarget) + 1) } else { @@ -3352,7 +3362,7 @@ func handleGetRawMempool(s *rpcServer, cmd interface{}, closeChan <-chan struct{ Size: int32(desc.Tx.MsgTx().SerializeSize()), Fee: dcrutil.Amount(desc.Fee).ToCoin(), Time: desc.Added.Unix(), - Height: desc.Height, + Height: int64(desc.Height), StartingPriority: startingPriority, CurrentPriority: currentPriority, Depends: make([]string, 0), @@ -3421,7 +3431,7 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str var maxIdx int64 var mtx *wire.MsgTx var blkHash *chainhash.Hash - var blkHeight int64 + var blkHeight int32 var blkIndex uint32 var tip *dcrutil.Block needsVotes := false @@ -3489,24 +3499,30 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str return mtxHex, nil } - var blk *dcrutil.Block + var blkHeader *wire.BlockHeader + var blkHashStr string + var chainHeight int32 if blkHash != nil { - if needsVotes { - blk = tip - } else { - blk, err = s.server.db.FetchBlockBySha(blkHash) - if err != nil { - rpcsLog.Errorf("Error fetching sha: %v", err) - return nil, &dcrjson.RPCError{ - Code: dcrjson.ErrRPCBlockNotFound, - Message: "Block not found: " + err.Error(), - } + blkHeader, err = s.server.db.FetchBlockHeaderBySha(blkHash) + if err != nil { + rpcsLog.Errorf("Error fetching sha: %v", err) + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCBlockNotFound, + Message: "Block not found: " + err.Error(), } } + + _, chainHeight, err = s.server.db.NewestSha() + if err != nil { + context := "Failed to get newest hash" + return nil, internalRPCError(err.Error(), context) + } + + blkHashStr = blkHash.String() } - rawTxn, err := createTxRawResult(s.server.chainParams, c.Txid, mtx, blk, - maxIdx, blkHash, blkHeight, blkIndex) + rawTxn, err := createTxRawResult(s.server.chainParams, mtx, + txHash.String(), blkHeader, blkHashStr, blkHeight, blkIndex, chainHeight) if err != nil { return nil, err } @@ -3585,7 +3601,7 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i // from there, otherwise attempt to fetch from the block database. var mtx *wire.MsgTx var bestBlockSha string - var confirmations int64 + var confirmations int32 var dbSpentInfo []bool includeMempool := true if c.IncludeMempool != nil { @@ -3671,7 +3687,7 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i txOutReply := &dcrjson.GetTxOutResult{ BestBlock: bestBlockSha, - Confirmations: confirmations, + Confirmations: int64(confirmations), Value: dcrutil.Amount(txOut.Value).ToUnit(dcrutil.AmountCoin), Version: mtx.Version, ScriptPubKey: dcrjson.ScriptPubKeyResult{ @@ -4281,9 +4297,11 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, // within a block. So we conditionally fetch a txs // embedded block here. This will be reflected in the // final JSON output (mempool won't have confirmations). - var blk *dcrutil.Block + var blkHeader *wire.BlockHeader + var blkHashStr string + var blkHeight int32 if txReply.BlkSha != nil { - blk, err = s.server.db.FetchBlockBySha(txReply.BlkSha) + blkHeader, err = s.server.db.FetchBlockHeaderBySha(txReply.BlkSha) if err != nil { rpcsLog.Errorf("Error fetching sha: %v", err) return nil, &dcrjson.RPCError{ @@ -4291,14 +4309,22 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, Message: "Block not found", } } + blkHashStr = txReply.BlkSha.String() + blkHeight = txReply.Height } var blkHash *chainhash.Hash var blkHeight int64 var blkIndex uint32 + blk, err := s.server.db.FetchBlockBySha(txReply.BlkSha) + if err != nil { + rpcsLog.Errorf("Error fetching sha: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, + Message: "Block not found", + } + } if blk != nil { - blkHash = blk.Sha() - blkHeight = blk.Height() blkIndex = wire.NullBlockIndex for i, tx := range blk.Transactions() { @@ -4308,8 +4334,9 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, } } - rawTxn, err := createTxRawResult(s.server.chainParams, - txHash, mtx, blk, maxIdx, blkHash, blkHeight, blkIndex) + rawTxn, err := createTxRawResult(s.server.chainParams, mtx, + txHash, blkHeader, blkHashStr, blkHeight, blkIndex, maxIdx) + if err != nil { return nil, err } @@ -4503,7 +4530,7 @@ func verifyChain(db database.Db, level, depth int32, timeSource blockchain.Media for height := curHeight; height > finishHeight; height-- { // Level 0 just looks up the block. - sha, err := db.FetchBlockShaByHeight(int64(height)) + sha, err := db.FetchBlockShaByHeight(height) if err != nil { rpcsLog.Errorf("Verify is unable to fetch block at "+ "height %d: %v", height, err) diff --git a/rpcserverhelp.go b/rpcserverhelp.go index 8d5091da..89b5a985 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -153,14 +153,9 @@ var helpDescsEnUS = map[string]string{ "existsliveticket--result0": "Bool showing if address exists in the live ticket database or not", // ExistsLiveTicketsCmd help. - "existslivetickets--synopsis": "Test for the existance of the provided tickets in the live ticket map", + "existslivetickets--synopsis": "Test for the existance of the provided tickets", "existslivetickets-txhashblob": "Blob containing the hashes to check", - "existslivetickets--result0": "Bool blob showing if ticket exists in the live ticket database or not", - - // ExistsMempoolTxsCmd help. - "existsmempooltxs--synopsis": "Test for the existance of the provided txs in the mempool", - "existsmempooltxs-txhashblob": "Blob containing the hashes to check", - "existsmempooltxs--result0": "Bool blob showing if txs exist in the mempool or not", + "existslivetickets--result0": "Bool showing if address exists in the live ticket database or not", // GenerateCmd help "generate--synopsis": "Generates a set number of blocks (simnet or regtest only) and returns a JSON\n" + @@ -260,6 +255,27 @@ var helpDescsEnUS = map[string]string{ "getblockhash-index": "The block height", "getblockhash--result0": "The block hash", + // GetBlockHeaderCmd help. + "getblockheader--synopsis": "Returns information about a block header given its hash.", + "getblockheader-hash": "The hash of the block", + "getblockheader-verbose": "Specifies the block header is returned as a JSON object instead of hex-encoded string", + "getblockheader--condition0": "verbose=false", + "getblockheader--condition1": "verbose=true", + "getblockheader--result0": "The block header hash", + + // GetBlockHeaderVerboseResult help. + "getblockheaderverboseresult-hash": "The hash of the block (same as provided)", + "getblockheaderverboseresult-confirmations": "The number of confirmations", + "getblockheaderverboseresult-height": "The height of the block in the block chain", + "getblockheaderverboseresult-version": "The block version", + "getblockheaderverboseresult-merkleroot": "Root hash of the merkle tree", + "getblockheaderverboseresult-time": "The block time in seconds since 1 Jan 1970 GMT", + "getblockheaderverboseresult-nonce": "The block nonce", + "getblockheaderverboseresult-bits": "The bits which represent the block difficulty", + "getblockheaderverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty", + "getblockheaderverboseresult-previousblockhash": "The hash of the previous block", + "getblockheaderverboseresult-nextblockhash": "The hash of the next block (only if there is one)", + // TemplateRequest help. "templaterequest-mode": "This is 'template', 'proposal', or omitted", "templaterequest-capabilities": "List of capabilities", @@ -375,6 +391,13 @@ var helpDescsEnUS = map[string]string{ // GetInfoCmd help. "getinfo--synopsis": "Returns a JSON object containing various state info.", + // GetMempoolInfoCmd help. + "getmempoolinfo--synopsis": "Returns memory pool information", + + // GetMempoolInfoResult help. + "getmempoolinforesult-bytes": "Size in bytes of the mempool", + "getmempoolinforesult-size": "Number of transactions in the mempool", + // GetMiningInfoResult help. "getmininginforesult-blocks": "Height of the latest best block", "getmininginforesult-currentblocksize": "Size of the latest best block", @@ -674,8 +697,7 @@ var rpcResultTypes = map[string][]interface{}{ "estimatefee": []interface{}{(*float64)(nil)}, "existsaddress": []interface{}{(*bool)(nil)}, "existsliveticket": []interface{}{(*bool)(nil)}, - "existslivetickets": []interface{}{(*string)(nil)}, - "existsmempooltxs": []interface{}{(*string)(nil)}, + "existslivetickets": []interface{}{(*bool)(nil)}, "getaddednodeinfo": []interface{}{(*[]string)(nil), (*[]dcrjson.GetAddedNodeInfoResult)(nil)}, "getbestblock": []interface{}{(*dcrjson.GetBestBlockResult)(nil)}, "generate": []interface{}{(*[]string)(nil)}, @@ -683,6 +705,7 @@ var rpcResultTypes = map[string][]interface{}{ "getblock": []interface{}{(*string)(nil), (*dcrjson.GetBlockVerboseResult)(nil)}, "getblockcount": []interface{}{(*int64)(nil)}, "getblockhash": []interface{}{(*string)(nil)}, + "getblockheader": []interface{}{(*string)(nil), (*dcrjson.GetBlockHeaderVerboseResult)(nil)}, "getblocktemplate": []interface{}{(*dcrjson.GetBlockTemplateResult)(nil), (*string)(nil), nil}, "getconnectioncount": []interface{}{(*int32)(nil)}, "getcurrentnet": []interface{}{(*uint32)(nil)}, @@ -691,6 +714,7 @@ var rpcResultTypes = map[string][]interface{}{ "getgenerate": []interface{}{(*bool)(nil)}, "gethashespersec": []interface{}{(*float64)(nil)}, "getinfo": []interface{}{(*dcrjson.InfoChainResult)(nil)}, + "getmempoolinfo": []interface{}{(*dcrjson.GetMempoolInfoResult)(nil)}, "getmininginfo": []interface{}{(*dcrjson.GetMiningInfoResult)(nil)}, "getnettotals": []interface{}{(*dcrjson.GetNetTotalsResult)(nil)}, "getnetworkhashps": []interface{}{(*int64)(nil)}, diff --git a/rpcwebsocket.go b/rpcwebsocket.go index 00b4d075..44e5d762 100644 --- a/rpcwebsocket.go +++ b/rpcwebsocket.go @@ -868,8 +868,8 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie } net := m.server.server.chainParams - rawTx, err := createTxRawResult(net, txShaStr, mtx, nil, - 0, nil, int64(wire.NullBlockHeight), wire.NullBlockIndex) + rawTx, err := createTxRawResult(net, mtx, txShaStr, nil, + "", 0, wire.NullBlockIndex, 0) if err != nil { return } @@ -2040,8 +2040,8 @@ type rescanKeys struct { fallbacks map[string]struct{} pubKeyHashes map[[ripemd160.Size]byte]struct{} scriptHashes map[[ripemd160.Size]byte]struct{} - compressedPubkeys map[[33]byte]struct{} - uncompressedPubkeys map[[65]byte]struct{} + compressedPubKeys map[[33]byte]struct{} + uncompressedPubKeys map[[65]byte]struct{} unspent map[wire.OutPoint]struct{} } @@ -2165,14 +2165,14 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *dcrutil.Block, case 33: // Compressed var key [33]byte copy(key[:], sa) - if _, ok := lookups.compressedPubkeys[key]; ok { + if _, ok := lookups.compressedPubKeys[key]; ok { found = true } case 65: // Uncompressed var key [65]byte copy(key[:], sa) - if _, ok := lookups.uncompressedPubkeys[key]; ok { + if _, ok := lookups.uncompressedPubKeys[key]; ok { found = true } @@ -2259,7 +2259,7 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *dcrutil.Block, // verifies that the new range of blocks is on the same fork as a previous // range of blocks. If this condition does not hold true, the JSON-RPC error // for an unrecoverable reorganize is returned. -func recoverFromReorg(db database.Db, minBlock, maxBlock int64, +func recoverFromReorg(db database.Db, minBlock, maxBlock int32, lastBlock *dcrutil.Block) ([]chainhash.Hash, error) { hashList, err := db.FetchHeightRange(minBlock, maxBlock) @@ -2516,8 +2516,8 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { fallbacks: map[string]struct{}{}, pubKeyHashes: map[[ripemd160.Size]byte]struct{}{}, scriptHashes: map[[ripemd160.Size]byte]struct{}{}, - compressedPubkeys: map[[33]byte]struct{}{}, - uncompressedPubkeys: map[[65]byte]struct{}{}, + compressedPubKeys: map[[33]byte]struct{}{}, + uncompressedPubKeys: map[[65]byte]struct{}{}, unspent: map[wire.OutPoint]struct{}{}, } var compressedPubkey [33]byte @@ -2544,11 +2544,11 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { switch len(pubkeyBytes) { case 33: // Compressed copy(compressedPubkey[:], pubkeyBytes) - lookups.compressedPubkeys[compressedPubkey] = struct{}{} + lookups.compressedPubKeys[compressedPubkey] = struct{}{} case 65: // Uncompressed copy(uncompressedPubkey[:], pubkeyBytes) - lookups.uncompressedPubkeys[uncompressedPubkey] = struct{}{} + lookups.uncompressedPubKeys[uncompressedPubkey] = struct{}{} default: jsonErr := dcrjson.RPCError{ @@ -2697,7 +2697,7 @@ fetchRange: // A goto is used to branch executation back to // before the range was evaluated, as it must be // reevaluated for the new hashList. - minBlock += int64(i) + minBlock += int32(i) hashList, err = recoverFromReorg(db, minBlock, maxBlock, lastBlock) if err != nil { @@ -2799,7 +2799,7 @@ fetchRange: } } - minBlock += int64(len(hashList)) + minBlock += int32(len(hashList)) } // Scan the mempool for addresses. diff --git a/sample-dcrd.conf b/sample-dcrd.conf index f06576dd..9d383686 100644 --- a/sample-dcrd.conf +++ b/sample-dcrd.conf @@ -198,6 +198,33 @@ ; server without having to remove credentials from the config file. ; norpc=1 +; Use the following setting to disable TLS for the RPC server. NOTE: This +; option only works if the RPC server is bound to localhost interfaces (which is +; the default). +; notls=1 + +; ------------------------------------------------------------------------------ +; Mempool Settings - The following options +; ------------------------------------------------------------------------------ + +; Rate-limit free transactions to the value 15 * 1000 bytes per +; minute. +; limitfreerelay=15 + +; Require high priority for relaying free or low-fee transactions. +; norelaypriority=0 + +; Limit orphan transaction pool to 1000 transactions. +; maxorphantx=1000 + +; ------------------------------------------------------------------------------ +; Optional Transaction Indexes +; ------------------------------------------------------------------------------ + +; Build and maintain a full address-based transaction index. +; addrindex=1 +; Delete the entire address index on start up, then exit. +; dropaddrindex=0 ; ------------------------------------------------------------------------------ ; Coin Generation (Mining) Settings - The following options control the diff --git a/server.go b/server.go index 0b9e1243..5afee8ef 100644 --- a/server.go +++ b/server.go @@ -43,10 +43,6 @@ const ( // server. supportedServices = wire.SFNodeNetwork - // connectionRetryInterval is the amount of time to wait in between - // retries when connecting to persistent peers. - connectionRetryInterval = time.Second * 10 - // defaultMaxOutbound is the default number of max outbound peers. defaultMaxOutbound = 8 ) @@ -312,7 +308,9 @@ func (s *server) handleDonePeerMsg(state *peerState, p *peer) { // Issue an asynchronous reconnect if the peer was a // persistent outbound connection. if !p.inbound && p.persistent && atomic.LoadInt32(&s.shutdown) == 0 { + delete(list, e) e = newOutboundPeer(s, p.addr, true, p.retryCount+1) + list[e] = struct{}{} return } if !p.inbound { diff --git a/txscript/error.go b/txscript/error.go index 4314f096..f5567108 100644 --- a/txscript/error.go +++ b/txscript/error.go @@ -30,11 +30,11 @@ var ( // ErrStackOpDisabled is returned when a disabled opcode is encountered // in the script. - ErrStackOpDisabled = errors.New("Disabled Opcode") + ErrStackOpDisabled = errors.New("disabled opcode") // ErrStackVerifyFailed is returned when one of the OP_VERIFY or // OP_*VERIFY instructions is executed and the conditions fails. - ErrStackVerifyFailed = errors.New("Verify failed") + ErrStackVerifyFailed = errors.New("verify failed") // ErrStackNumberTooBig is returned when the argument for an opcode that // should be an offset is obviously far too large. @@ -42,15 +42,15 @@ var ( // ErrStackInvalidOpcode is returned when an opcode marked as invalid or // a completely undefined opcode is encountered. - ErrStackInvalidOpcode = errors.New("Invalid Opcode") + ErrStackInvalidOpcode = errors.New("invalid opcode") // ErrStackReservedOpcode is returned when an opcode marked as reserved // is encountered. - ErrStackReservedOpcode = errors.New("Reserved Opcode") + ErrStackReservedOpcode = errors.New("reserved opcode") // ErrStackEarlyReturn is returned when OP_RETURN is executed in the // script. - ErrStackEarlyReturn = errors.New("Script returned early") + ErrStackEarlyReturn = errors.New("script returned early") // ErrStackNoIf is returned if an OP_ELSE or OP_ENDIF is encountered // without first having an OP_IF or OP_NOTIF in the script. @@ -60,17 +60,17 @@ var ( // without and OP_ENDIF to correspond to a conditional expression. ErrStackMissingEndif = fmt.Errorf("execute fail, in conditional execution") - // ErrStackTooManyPubkeys is returned if an OP_CHECKMULTISIG is + // ErrStackTooManyPubKeys is returned if an OP_CHECKMULTISIG is // encountered with more than MaxPubKeysPerMultiSig pubkeys present. - ErrStackTooManyPubkeys = errors.New("Invalid pubkey count in OP_CHECKMULTISIG") + ErrStackTooManyPubKeys = errors.New("invalid pubkey count in OP_CHECKMULTISIG") // ErrStackTooManyOperations is returned if a script has more than // MaxOpsPerScript opcodes that do not push data. - ErrStackTooManyOperations = errors.New("Too many operations in script") + ErrStackTooManyOperations = errors.New("too many operations in script") // ErrStackElementTooBig is returned if the size of an element to be // pushed to the stack is over MaxScriptElementSize. - ErrStackElementTooBig = errors.New("Element in script too large") + ErrStackElementTooBig = errors.New("element in script too large") // ErrStackUnknownAddress is returned when ScriptToAddrHash does not // recognise the pattern of the script and thus can not find the address @@ -84,12 +84,12 @@ var ( // ErrStackScriptUnfinished is returned when CheckErrorCondition is // called on a script that has not finished executing. - ErrStackScriptUnfinished = errors.New("Error check when script unfinished") + ErrStackScriptUnfinished = errors.New("error check when script unfinished") // ErrStackEmptyStack is returned when the stack is empty at the end of // execution. Normal operation requires that a boolean is on top of the // stack when the scripts have finished executing. - ErrStackEmptyStack = errors.New("Stack empty at end of execution") + ErrStackEmptyStack = errors.New("stack empty at end of execution") // ErrStackP2SHNonPushOnly is returned when a Pay-to-Script-Hash // transaction is encountered and the ScriptSig does operations other @@ -107,7 +107,7 @@ var ( // ErrStackInvalidIndex is returned when an out-of-bounds index was // passed to a function. - ErrStackInvalidIndex = errors.New("Invalid script index") + ErrStackInvalidIndex = errors.New("invalid script index") // ErrStackNonPushOnly is returned when ScriptInfo is called with a // pkScript that peforms operations other that pushing data to the stack. @@ -115,7 +115,7 @@ var ( // ErrStackOverflow is returned when stack and altstack combined depth // is over the limit. - ErrStackOverflow = errors.New("Stacks overflowed") + ErrStackOverflow = errors.New("stack overflow") // ErrStackInvalidLowSSignature is returned when the ScriptVerifyLowS // flag is set and the script contains any signatures whose S values diff --git a/txscript/opcode.go b/txscript/opcode.go index aaab8463..fbc22da9 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -2455,7 +2455,7 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { numPubKeys := int(numKeys.Int32()) if numPubKeys < 0 || numPubKeys > MaxPubKeysPerMultiSig { - return ErrStackTooManyPubkeys + return ErrStackTooManyPubKeys } vm.numOps += numPubKeys if vm.numOps > MaxOpsPerScript { diff --git a/txscript/script.go b/txscript/script.go index 70cbd0a7..ea82bc8e 100644 --- a/txscript/script.go +++ b/txscript/script.go @@ -328,9 +328,6 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, // inputs that are not currently being processed. txCopy := tx.Copy() for i := range txCopy.TxIn { - var txIn wire.TxIn - txIn = *txCopy.TxIn[i] - txCopy.TxIn[i] = &txIn if i == idx { // UnparseScript cannot fail here because removeOpcode // above only returns a valid script. @@ -341,13 +338,6 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, } } - // Default behavior has all outputs set up. - for i := range txCopy.TxOut { - var txOut wire.TxOut - txOut = *txCopy.TxOut[i] - txCopy.TxOut[i] = &txOut - } - switch hashType & sigHashMask { case SigHashNone: txCopy.TxOut = txCopy.TxOut[0:0] // Empty slice. @@ -530,3 +520,15 @@ func GetPreciseSigOpCount(scriptSig, scriptPubKey []byte, bip16 bool) int { shPops, _ := parseScript(shScript) return getSigOpCount(shPops, true) } + +// IsUnspendable returns whether the passed public key script is unspendable, or +// guaranteed to fail at execution. This allows inputs to be pruned instantly +// when entering the UTXO set. +func IsUnspendable(pkScript []byte) bool { + pops, err := parseScript(pkScript) + if err != nil { + return true + } + + return len(pops) > 0 && pops[0].opcode.value == OP_RETURN +} diff --git a/txscript/script_test.go b/txscript/script_test.go index d706d153..9db572f4 100644 --- a/txscript/script_test.go +++ b/txscript/script_test.go @@ -576,3 +576,38 @@ func TestCalcSignatureHash(t *testing.T) { msg3) } } + +// TestIsUnspendable ensures the IsUnspendable function returns the expected +// results. +func TestIsUnspendable(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + pkScript []byte + expected bool + }{ + { + // Unspendable + pkScript: []byte{0x6a, 0x04, 0x74, 0x65, 0x73, 0x74}, + expected: true, + }, + { + // Spendable + pkScript: []byte{0x76, 0xa9, 0x14, 0x29, 0x95, 0xa0, + 0xfe, 0x68, 0x43, 0xfa, 0x9b, 0x95, 0x45, + 0x97, 0xf0, 0xdc, 0xa7, 0xa4, 0x4d, 0xf6, + 0xfa, 0x0b, 0x5c, 0x88, 0xac}, + expected: false, + }, + } + + for i, test := range tests { + res := txscript.IsUnspendable(test.pkScript) + if res != test.expected { + t.Errorf("TestIsUnspendable #%d failed: got %v want %v", + i, res, test.expected) + continue + } + } +} diff --git a/txscript/sign.go b/txscript/sign.go index ba653e43..46b5e595 100644 --- a/txscript/sign.go +++ b/txscript/sign.go @@ -545,10 +545,10 @@ type KeyDB interface { } // KeyClosure implements ScriptDB with a closure -type KeyClosure func(dcrutil.Address) (chainec.PrivateKey, bool, error) +type KeyClosure func(dcrutil.Address) (*chainec.PrivateKey, bool, error) // GetKey implements KeyDB by returning the result of calling the closure -func (kc KeyClosure) GetKey(address dcrutil.Address) (chainec.PrivateKey, +func (kc KeyClosure) GetKey(address dcrutil.Address) (*chainec.PrivateKey, bool, error) { return kc(address) } diff --git a/txscript/standard.go b/txscript/standard.go index 1d5ec00f..d033080b 100644 --- a/txscript/standard.go +++ b/txscript/standard.go @@ -249,7 +249,7 @@ func IsMultisigSigScript(script []byte) bool { func isNullData(pops []parsedOpcode) bool { // A nulldata transaction is either a single OP_RETURN or an // OP_RETURN SMALLDATA (where SMALLDATA is a data push up to - // maxDataCarrierSize bytes). + // MaxDataCarrierSize bytes). l := len(pops) if l == 1 && pops[0].opcode.value == OP_RETURN { return true @@ -258,7 +258,7 @@ func isNullData(pops []parsedOpcode) bool { return l == 2 && pops[0].opcode.value == OP_RETURN && pops[1].opcode.value <= OP_PUSHDATA4 && - len(pops[1].data) <= maxDataCarrierSize + len(pops[1].data) <= MaxDataCarrierSize } // isStakeSubmission returns true if the script passed is a stake submission tx, diff --git a/version.go b/version.go index 3bf3f5c7..f04f6634 100644 --- a/version.go +++ b/version.go @@ -19,7 +19,7 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr const ( appMajor uint = 0 appMinor uint = 0 - appPatch uint = 6 + appPatch uint = 5 // appPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. diff --git a/wire/blockheader.go b/wire/blockheader.go index 4e8577f0..2eefa5dc 100644 --- a/wire/blockheader.go +++ b/wire/blockheader.go @@ -95,6 +95,22 @@ func (h *BlockHeader) BlockSha() chainhash.Hash { return chainhash.HashFuncH(buf.Bytes()) } +// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// This is part of the Message interface implementation. +// See Deserialize for decoding block headers stored to disk, such as in a +// database, as opposed to decoding block headers from the wire. +func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32) error { + return readBlockHeader(r, pver, h) +} + +// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// This is part of the Message interface implementation. +// See Serialize for encoding block headers to be stored to disk, such as in a +// database, as opposed to encoding block headers for the wire. +func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32) error { + return writeBlockHeader(w, pver, h) +} + // Deserialize decodes a block header from r into the receiver using a format // that is suitable for long-term storage such as a database while respecting // the Version field. diff --git a/wire/blockheader_test.go b/wire/blockheader_test.go index 7476c9b0..601532fc 100644 --- a/wire/blockheader_test.go +++ b/wire/blockheader_test.go @@ -113,6 +113,7 @@ func TestBlockHeader(t *testing.T) { // protocol versions. func TestBlockHeaderWire(t *testing.T) { nonce := uint32(123123) // 0x1e0f3 + pver := uint32(70001) /*bh := dcrwire.NewBlockHeader( &hash, @@ -216,14 +217,15 @@ func TestBlockHeaderWire(t *testing.T) { continue } - b, err := wire.TstBytesBlockHeader(test.in) + buf.Reset() + err = test.in.BtcEncode(&buf, pver) if err != nil { - t.Errorf("writeBlockHeader #%d error %v", i, err) + t.Errorf("BtcEncode #%d error %v", i, err) continue } - if !bytes.Equal(b, test.buf) { - t.Errorf("writeBlockHeader #%d\n got: %s want: %s", i, - spew.Sdump(b), spew.Sdump(test.buf)) + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("BtcEncode #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } @@ -240,6 +242,18 @@ func TestBlockHeaderWire(t *testing.T) { spew.Sdump(&bh), spew.Sdump(test.out)) continue } + + rbuf = bytes.NewReader(test.buf) + err = bh.BtcDecode(rbuf, pver) + if err != nil { + t.Errorf("BtcDecode #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&bh, test.out) { + t.Errorf("BtcDecode #%d\n got: %s want: %s", i, + spew.Sdump(&bh), spew.Sdump(test.out)) + continue + } } }