Compare commits

..

No commits in common. "master" and "peer/v2.0.0" have entirely different histories.

291 changed files with 4707 additions and 5984 deletions

View File

@ -1,28 +0,0 @@
name: Build and Test
on: [push, pull_request]
jobs:
build:
name: Go CI
runs-on: ubuntu-latest
strategy:
matrix:
go: [1.12, 1.13]
steps:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Check out source
uses: actions/checkout@v1
- name: Install Linters
run: "curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0"
- name: Build
env:
GO111MODULE: "on"
run: go build ./...
- name: Test
env:
GO111MODULE: "on"
run: |
export PATH=${PATH}:$(go env GOPATH)/bin
sh ./run_tests.sh

35
.travis.yml Normal file
View File

@ -0,0 +1,35 @@
language: go
sudo: false
env:
- GO111MODULE=on
matrix:
include:
- os: linux
go: 1.12.x
cache:
directories:
- $HOME/.cache/go-build
- $HOME/go/pkg/mod
- os: linux
go: 1.11.x
cache:
directories:
- $HOME/.cache/go-build
- $HOME/go/pkg/mod
- os: osx
go: 1.12.x
cache:
directories:
- $HOME/.cache/go-build
- $HOME/go/pkg/mod
- os: osx
go: 1.11.x
cache:
directories:
- $HOME/Library/Caches/go-build
- $HOME/go/pkg/mod
install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1
script:
- env GO111MODULE=on go build ./...
- env GO111MODULE=on ./run_tests.sh

View File

@ -1,7 +1,7 @@
dcrd
====
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://travis-ci.org/decred/dcrd.png?branch=master)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd)
[![Go Report Card](https://goreportcard.com/badge/github.com/decred/dcrd)](https://goreportcard.com/report/github.com/decred/dcrd)

View File

@ -74,13 +74,6 @@ type localAddress struct {
score AddressPriority
}
// LocalAddr represents network address information for a local address.
type LocalAddr struct {
Address string
Port uint16
Score int32
}
// AddressPriority type is used to describe the hierarchy of local address
// discovery methods.
type AddressPriority int
@ -140,7 +133,7 @@ const (
newBucketsPerAddress = 8
// numMissingDays is the number of days before which we assume an
// address has vanished if we have not seen it announced in that long.
// address has vanished if we have not seen it announced in that long.
numMissingDays = 30
// numRetries is the number of tried without a single success before
@ -382,7 +375,7 @@ func (a *AddrManager) savePeers() {
return
}
// First we make a serialisable data structure so we can encode it to JSON.
// First we make a serialisable datastructure so we can encode it to JSON.
sam := new(serializedAddrManager)
sam.Version = serialisationVersion
copy(sam.Key[:], a.key[:])
@ -543,7 +536,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
if v.refs > 0 && v.tried {
return fmt.Errorf("address %s after serialisation "+
"which is both new and tried", k)
"which is both new and tried!", k)
}
}
@ -753,7 +746,7 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
// the relevant .onion address.
func ipString(na *wire.NetAddress) string {
if isOnionCatTor(na) {
// We know now that na.IP is long enough.
// We know now that na.IP is long enogh.
base32 := base32.StdEncoding.EncodeToString(na.IP[6:])
return strings.ToLower(base32) + ".onion"
}
@ -902,7 +895,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
ka.lastattempt = now
ka.attempts = 0
// move to tried set, optionally evicting other addresses if needed.
// move to tried set, optionally evicting other addresses if neeed.
if ka.tried {
return
}
@ -974,7 +967,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
a.addrNew[newBucket][rmkey] = rmka
}
// SetServices sets the services for the given address to the provided value.
// SetServices sets the services for the giiven address to the provided value.
func (a *AddrManager) SetServices(addr *wire.NetAddress, services wire.ServiceFlag) {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -1020,63 +1013,19 @@ func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPrior
return nil
}
// HasLocalAddress asserts if the manager has the provided local address.
func (a *AddrManager) HasLocalAddress(na *wire.NetAddress) bool {
key := NetAddressKey(na)
a.lamtx.Lock()
_, ok := a.localAddresses[key]
a.lamtx.Unlock()
return ok
}
// FetchLocalAddresses fetches a summary of local addresses information for
// the getnetworkinfo rpc.
func (a *AddrManager) FetchLocalAddresses() []LocalAddr {
a.lamtx.Lock()
defer a.lamtx.Unlock()
addrs := make([]LocalAddr, 0, len(a.localAddresses))
for _, addr := range a.localAddresses {
la := LocalAddr{
Address: addr.na.IP.String(),
Port: addr.na.Port,
}
addrs = append(addrs, la)
}
return addrs
}
const (
// Unreachable represents a publicly unreachable connection state
// between two addresses.
Unreachable = 0
// Default represents the default connection state between
// two addresses.
Default = iota
// Teredo represents a connection state between two RFC4380 addresses.
Teredo
// Ipv6Weak represents a weak IPV6 connection state between two
// addresses.
Ipv6Weak
// Ipv4 represents an IPV4 connection state between two addresses.
Ipv4
// Ipv6Strong represents a connection state between two IPV6 addresses.
Ipv6Strong
// Private represents a connection state connect between two Tor addresses.
Private
)
// getReachabilityFrom returns the relative reachability of the provided local
// address to the provided remote address.
func getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int {
const (
Unreachable = 0
Default = iota
Teredo
Ipv6Weak
Ipv4
Ipv6Strong
Private
)
if !IsRoutable(remoteAddr) {
return Unreachable
}
@ -1181,15 +1130,6 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
return bestAddress
}
// IsPeerNaValid asserts if the provided local address is routable
// and reachable from the peer that suggested it.
func (a *AddrManager) IsPeerNaValid(localAddr, remoteAddr *wire.NetAddress) bool {
net := getNetwork(localAddr)
reach := getReachabilityFrom(localAddr, remoteAddr)
return (net == IPv4Address && reach == Ipv4) || (net == IPv6Address &&
(reach == Ipv6Weak || reach == Ipv6Strong || reach == Teredo))
}
// New returns a new Decred address manager.
// Use Start to begin processing asynchronous address updates.
// The address manager uses lookupFunc for necessary DNS lookups.

View File

@ -21,7 +21,7 @@ var (
ipNet("192.168.0.0", 16, 32),
}
// rfc2544Net specifies the IPv4 block as defined by RFC2544
// rfc2544Net specifies the the IPv4 block as defined by RFC2544
// (198.18.0.0/15)
rfc2544Net = ipNet("198.18.0.0", 15, 32)
@ -78,7 +78,7 @@ var (
// byte number. It then stores the first 6 bytes of the address as
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
//
// This is the same range used by OnionCat, which is part of the
// This is the same range used by OnionCat, which is part part of the
// RFC4193 unique local IPv6 range.
//
// In summary the format is:
@ -118,33 +118,6 @@ func isOnionCatTor(na *wire.NetAddress) bool {
return onionCatNet.Contains(na.IP)
}
// NetworkAddress type is used to classify a network address.
type NetworkAddress int
const (
LocalAddress NetworkAddress = iota
IPv4Address
IPv6Address
OnionAddress
)
// getNetwork returns the network address type of the provided network address.
func getNetwork(na *wire.NetAddress) NetworkAddress {
switch {
case isLocal(na):
return LocalAddress
case isIPv4(na):
return IPv4Address
case isOnionCatTor(na):
return OnionAddress
default:
return IPv6Address
}
}
// isRFC1918 returns whether or not the passed address is part of the IPv4
// private network address space as defined by RFC1918 (10.0.0.0/8,
// 172.16.0.0/12, or 192.168.0.0/16).

View File

@ -1,7 +1,7 @@
bech32
==========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd/bech32)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/bech32)

View File

@ -18,7 +18,7 @@ const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
// toBytes converts each character in the string 'chars' to the value of the
// index of the corresponding character in 'charset'.
// index of the correspoding character in 'charset'.
func toBytes(chars string) ([]byte, error) {
decoded := make([]byte, 0, len(chars))
for i := 0; i < len(chars); i++ {
@ -163,7 +163,7 @@ func DecodeNoLimit(bech string) (string, []byte, error) {
return "", nil, ErrInvalidLength(len(bech))
}
// Only ASCII characters between 33 and 126 are allowed.
// Only ASCII characters between 33 and 126 are allowed.
var hasLower, hasUpper bool
for i := 0; i < len(bech); i++ {
if bech[i] < 33 || bech[i] > 126 {

View File

@ -76,7 +76,7 @@ func TestBech32(t *testing.T) {
str, encoded)
}
// Flip a bit in the string and make sure it is caught.
// Flip a bit in the string an make sure it is caught.
pos := strings.LastIndexAny(str, "1")
flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:]
_, _, err = Decode(flipped)
@ -115,7 +115,7 @@ func TestCanDecodeUnlimtedBech32(t *testing.T) {
}
// BenchmarkEncodeDecodeCycle performs a benchmark for a full encode/decode
// cycle of a bech32 string. It also reports the allocation count, which we
// cycle of a bech32 string. It also reports the allocation count, which we
// expect to be 2 for a fully optimized cycle.
func BenchmarkEncodeDecodeCycle(b *testing.B) {

View File

@ -1,7 +1,7 @@
blockchain
==========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/blockchain)
@ -67,7 +67,17 @@ is by no means exhaustive:
attempts to insert a duplicate genesis block to illustrate how an invalid
block is handled.
* [CompactToBig Example](https://godoc.org/github.com/decred/dcrd/blockchain#example-CompactToBig)
Demonstrates how to convert the compact "bits" in a block header which
represent the target difficulty to a big integer and display it using the
typical hex notation.
* [BigToCompact Example](https://godoc.org/github.com/decred/dcrd/blockchain#example-BigToCompact)
Demonstrates how to convert how to convert a target difficulty into the
compact "bits" in a block header which represent that target difficulty.
## License
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@ -584,7 +584,7 @@ func (b *BlockChain) fetchBlockByNode(node *blockNode) (*dcrutil.Block, error) {
// pruneStakeNodes removes references to old stake nodes which should no
// longer be held in memory so as to keep the maximum memory usage down.
// It proceeds from the bestNode back to the determined minimum height node,
// finds all the relevant children, and then drops the stake nodes from
// finds all the relevant children, and then drops the the stake nodes from
// them by assigning nil and allowing the memory to be recovered by GC.
//
// This function MUST be called with the chain state lock held (for writes).
@ -914,7 +914,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block, parent *dcrutil.Blo
}
// Update the transaction spend journal by removing the record
// that contains all txos spent by the block.
// that contains all txos spent by the block .
err = dbRemoveSpendJournalEntry(dbTx, block.Hash())
if err != nil {
return err
@ -1118,7 +1118,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error {
tip = n.parent
}
// Load the fork block if there are blocks to attach and it's not already
// Load the fork block if there are blocks to attach and its not already
// loaded which will be the case if no nodes were detached. The fork block
// is used as the parent to the first node to be attached below.
forkBlock := nextBlockToDetach
@ -1413,7 +1413,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block, parent *dcrutil.Bl
// and flush the status changes to the database. It is safe to
// ignore any errors when flushing here as the changes will be
// flushed when a valid block is connected, and the worst case
// scenario if a block is invalid is it would need to be
// scenario if a block a invalid is it would need to be
// revalidated after a restart.
view := NewUtxoViewpoint()
view.SetBestHash(parentHash)
@ -1437,7 +1437,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block, parent *dcrutil.Bl
// In the fast add case the code to check the block connection
// was skipped, so the utxo view needs to load the referenced
// utxos, spend them, and add the new utxos being created by
// this block. Also, in the case the block votes against
// this block. Also, in the case the the block votes against
// the parent, its regular transaction tree must be
// disconnected.
if fastAdd {
@ -2064,13 +2064,6 @@ func New(config *Config) (*BlockChain, error) {
return nil, err
}
// Either use the subsidy cache provided by the caller or create a new
// one when one was not provided.
subsidyCache := config.SubsidyCache
if subsidyCache == nil {
subsidyCache = standalone.NewSubsidyCache(params)
}
b := BlockChain{
checkpointsByHeight: checkpointsByHeight,
deploymentVers: deploymentVers,
@ -2081,7 +2074,6 @@ func New(config *Config) (*BlockChain, error) {
sigCache: config.SigCache,
indexManager: config.IndexManager,
interrupt: config.Interrupt,
subsidyCache: subsidyCache,
index: newBlockIndex(config.DB),
bestChain: newChainView(nil),
orphans: make(map[chainhash.Hash]*orphanBlock),
@ -2095,7 +2087,6 @@ func New(config *Config) (*BlockChain, error) {
calcVoterVersionIntervalCache: make(map[[chainhash.HashSize]byte]uint32),
calcStakeVersionCache: make(map[[chainhash.HashSize]byte]uint32),
}
b.pruner = newChainPruner(&b)
// Initialize the chain state from the passed database. When the db
// does not yet contain any chain state, both it and the chain state
@ -2113,6 +2104,15 @@ func New(config *Config) (*BlockChain, error) {
}
}
// Either use the subsidy cache provided by the caller or create a new
// one when one was not provided.
subsidyCache := config.SubsidyCache
if subsidyCache == nil {
subsidyCache = standalone.NewSubsidyCache(b.chainParams)
}
b.subsidyCache = subsidyCache
b.pruner = newChainPruner(&b)
// The version 5 database upgrade requires a full reindex. Perform, or
// resume, the reindex as needed.
if err := b.maybeFinishV5Upgrade(); err != nil {

View File

@ -1,7 +1,7 @@
chaingen
========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://travis-ci.org/decred/dcrd.png?branch=master)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/blockchain/chaingen)

View File

@ -51,7 +51,7 @@ func Example_basicUsage() {
g.AssertTipHeight(uint32(coinbaseMaturity) + 1)
// Output:
// bfb
// bp
// bm0
// bm1
// bm2

View File

@ -1152,7 +1152,7 @@ func (hp *hash256prng) Hash256Rand() uint32 {
}
// Roll over the entire PRNG by re-hashing the seed when the hash
// iterator index overflows a uint32.
// iterator index overlows a uint32.
if hp.idx > math.MaxUint32 {
hp.seed = chainhash.HashH(hp.seed[:])
hp.cachedHash = hp.seed
@ -1568,7 +1568,7 @@ func (g *Generator) ReplaceVoteBitsN(voteNum int, voteBits uint16) func(*wire.Ms
stx := b.STransactions[voteNum]
if !isVoteTx(stx) {
panic(fmt.Sprintf("attempt to replace non-vote "+
"transaction #%d for block %s", voteNum,
"transaction #%d for for block %s", voteNum,
b.BlockHash()))
}
@ -2458,7 +2458,7 @@ func (g *Generator) AssertTipBlockSigOpsCount(expected int) {
}
}
// AssertTipBlockSize panics if the current tip block associated with the
// AssertTipBlockSize panics if the if the current tip block associated with the
// generator does not have the specified size when serialized.
func (g *Generator) AssertTipBlockSize(expected int) {
serializeSize := g.tip.SerializeSize()

View File

@ -130,48 +130,27 @@ func deserializeToMinimalOutputs(serialized []byte) ([]*stake.MinimalOutput, int
}
// readDeserializeSizeOfMinimalOutputs reads the size of the stored set of
// minimal outputs without allocating memory for the structs themselves.
func readDeserializeSizeOfMinimalOutputs(serialized []byte) (int, error) {
// minimal outputs without allocating memory for the structs themselves. It
// will panic if the function reads outside of memory bounds.
func readDeserializeSizeOfMinimalOutputs(serialized []byte) int {
numOutputs, offset := deserializeVLQ(serialized)
if offset == 0 {
return offset, errDeserialize("unexpected end of " +
"data during decoding (num outputs)")
}
for i := 0; i < int(numOutputs); i++ {
// Amount
_, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of " +
"data during decoding (output amount)")
}
offset += bytesRead
// Script version
_, bytesRead = deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of " +
"data during decoding (output script version)")
}
offset += bytesRead
// Script
var scriptSize uint64
scriptSize, bytesRead = deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of " +
"data during decoding (output script size)")
}
offset += bytesRead
if uint64(len(serialized[offset:])) < scriptSize {
return offset, errDeserialize("unexpected end of " +
"data during decoding (output script)")
}
offset += int(scriptSize)
}
return offset, nil
return offset
}
// ConvertUtxosToMinimalOutputs converts the contents of a UTX to a series of
@ -489,7 +468,7 @@ func dbMaybeStoreBlock(dbTx database.Tx, block *dcrutil.Block) error {
// NOTE: The transaction version and flags are only encoded when the spent
// txout was the final unspent output of the containing transaction.
// Otherwise, the header code will be 0 and the version is not serialized at
// all. This is done because that information is only needed when the utxo
// all. This is done because that information is only needed when the utxo
// set no longer has it.
//
// Example:
@ -511,7 +490,7 @@ type spentTxOut struct {
amount int64 // The amount of the output.
txType stake.TxType // The stake type of the transaction.
height uint32 // Height of the block containing the tx.
height uint32 // Height of the the block containing the tx.
index uint32 // Index in the block of the transaction.
scriptVersion uint16 // The version of the scripting language.
txVersion uint16 // The version of creating tx.
@ -586,11 +565,27 @@ func putSpentTxOut(target []byte, stxo *spentTxOut) int {
// An error will be returned if the version is not serialized as a part of the
// stxo and is also not provided to the function.
func decodeSpentTxOut(serialized []byte, stxo *spentTxOut, amount int64, height uint32, index uint32) (int, error) {
// Deserialize the flags.
// Ensure there are bytes to decode.
if len(serialized) == 0 {
return 0, errDeserialize("no serialized bytes")
}
// Deserialize the header code.
flags, offset := deserializeVLQ(serialized)
if offset == 0 {
return 0, errDeserialize("unexpected end of data during " +
"decoding (flags)")
if offset >= len(serialized) {
return offset, errDeserialize("unexpected end of data after " +
"spent tx out flags")
}
// Decode the flags. If the flags are non-zero, it means that the
// transaction was fully spent at this spend.
if decodeFlagsFullySpent(byte(flags)) {
isCoinBase, hasExpiry, txType, _ := decodeFlags(byte(flags))
stxo.isCoinBase = isCoinBase
stxo.hasExpiry = hasExpiry
stxo.txType = txType
stxo.txFullySpent = true
}
// Decode the compressed txout. We pass false for the amount flag,
@ -614,28 +609,22 @@ func decodeSpentTxOut(serialized []byte, stxo *spentTxOut, amount int64, height
// Deserialize the containing transaction if the flags indicate that
// the transaction has been fully spent.
if decodeFlagsFullySpent(byte(flags)) {
isCoinBase, hasExpiry, txType, _ := decodeFlags(byte(flags))
stxo.isCoinBase = isCoinBase
stxo.hasExpiry = hasExpiry
stxo.txType = txType
stxo.txFullySpent = true
txVersion, bytesRead := deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return offset, errDeserialize("unexpected end of " +
"data during decoding (tx version)")
}
offset += bytesRead
if offset == 0 || offset > len(serialized) {
return offset, errDeserialize("unexpected end of data " +
"after version")
}
stxo.txVersion = uint16(txVersion)
if stxo.txType == stake.TxTypeSStx {
sz, err := readDeserializeSizeOfMinimalOutputs(serialized[offset:])
if err != nil {
return offset + sz, errDeserialize(fmt.Sprintf("unable to decode "+
"ticket outputs: %v", err))
sz := readDeserializeSizeOfMinimalOutputs(serialized[offset:])
if sz == 0 || sz > len(serialized[offset:]) {
return offset, errDeserialize("corrupt data for ticket " +
"fully spent stxo stakeextra")
}
stakeExtra := make([]byte, sz)
copy(stakeExtra, serialized[offset:offset+sz])
stxo.stakeExtra = stakeExtra
@ -652,7 +641,7 @@ func decodeSpentTxOut(serialized []byte, stxo *spentTxOut, amount int64, height
// Since the serialization format is not self describing, as noted in the
// format comments, this function also requires the transactions that spend the
// txouts and a utxo view that contains any remaining existing utxos in the
// transactions referenced by the inputs to the passed transactions.
// transactions referenced by the inputs to the passed transasctions.
func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]spentTxOut, error) {
// Calculate the total number of stxos.
var numStxos int
@ -1450,16 +1439,6 @@ func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) err
return dbTx.Metadata().Put(dbnamespace.ChainStateKeyName, serializedData)
}
// dbFetchBestState uses an existing database transaction to fetch the best
// chain state.
func dbFetchBestState(dbTx database.Tx) (bestChainState, error) {
// Fetch the stored chain state from the database metadata.
meta := dbTx.Metadata()
serializedData := meta.Get(dbnamespace.ChainStateKeyName)
log.Tracef("Serialized chain state: %x", serializedData)
return deserializeBestChainState(serializedData)
}
// createChainState initializes both the database and the chain state to the
// genesis block. This includes creating the necessary buckets and inserting
// the genesis block, so it must only be called on an uninitialized database.
@ -1547,76 +1526,6 @@ func (b *BlockChain) createChainState() error {
return err
}
// loadBlockIndex loads all of the block index entries from the database and
// constructs the block index into the provided index parameter. It is not safe
// for concurrent access as it is only intended to be used during initialization
// and database migration.
func loadBlockIndex(dbTx database.Tx, genesisHash *chainhash.Hash, index *blockIndex) error {
// Determine how many blocks will be loaded into the index in order to
// allocate the right amount as a single alloc versus a whole bunch of
// little ones to reduce pressure on the GC.
meta := dbTx.Metadata()
blockIndexBucket := meta.Bucket(dbnamespace.BlockIndexBucketName)
var blockCount int32
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
blockCount++
}
blockNodes := make([]blockNode, blockCount)
// Load all of the block index entries and construct the block index
// accordingly.
//
// NOTE: No locks are used on the block index here since this is
// initialization code.
var i int32
var lastNode *blockNode
cursor = blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
entry, err := deserializeBlockIndexEntry(cursor.Value())
if err != nil {
return err
}
header := &entry.header
// Determine the parent block node. Since the block headers are
// iterated in order of height, there is a very good chance the
// previous header processed is the parent.
var parent *blockNode
if lastNode == nil {
blockHash := header.BlockHash()
if blockHash != *genesisHash {
return AssertError(fmt.Sprintf("loadBlockIndex: expected "+
"first entry in block index to be genesis block, "+
"found %s", blockHash))
}
} else if header.PrevBlock == lastNode.hash {
parent = lastNode
} else {
parent = index.lookupNode(&header.PrevBlock)
if parent == nil {
return AssertError(fmt.Sprintf("loadBlockIndex: could not "+
"find parent for block %s", header.BlockHash()))
}
}
// Initialize the block node, connect it, and add it to the block
// index.
node := &blockNodes[i]
initBlockNode(node, header, parent)
node.status = entry.status
node.ticketsVoted = entry.ticketsVoted
node.ticketsRevoked = entry.ticketsRevoked
node.votes = entry.voteInfo
index.addNode(node)
lastNode = node
i++
}
return nil
}
// initChainState attempts to load and initialize the chain state from the
// database. When the db does not yet contain any chain state, both it and the
// chain state are initialized to the genesis block.
@ -1719,8 +1628,17 @@ func (b *BlockChain) initChainState() error {
// Attempt to load the chain state from the database.
err = b.db.View(func(dbTx database.Tx) error {
// Fetch the stored best chain state from the database.
state, err := dbFetchBestState(dbTx)
// Fetch the stored chain state from the database metadata.
// When it doesn't exist, it means the database hasn't been
// initialized for use with chain yet, so break out now to allow
// that to happen under a writable database transaction.
meta := dbTx.Metadata()
serializedData := meta.Get(dbnamespace.ChainStateKeyName)
if serializedData == nil {
return nil
}
log.Tracef("Serialized chain state: %x", serializedData)
state, err := deserializeBestChainState(serializedData)
if err != nil {
return err
}
@ -1728,11 +1646,65 @@ func (b *BlockChain) initChainState() error {
log.Infof("Loading block index...")
bidxStart := time.Now()
// Load all of the block index entries from the database and
// construct the block index.
err = loadBlockIndex(dbTx, &b.chainParams.GenesisHash, b.index)
if err != nil {
return err
// Determine how many blocks will be loaded into the index in order to
// allocate the right amount as a single alloc versus a whole bunch of
// littles ones to reduce pressure on the GC.
blockIndexBucket := meta.Bucket(dbnamespace.BlockIndexBucketName)
var blockCount int32
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
blockCount++
}
blockNodes := make([]blockNode, blockCount)
// Load all of the block index entries and construct the block index
// accordingly.
//
// NOTE: No locks are used on the block index here since this is
// initialization code.
var i int32
var lastNode *blockNode
cursor = blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
entry, err := deserializeBlockIndexEntry(cursor.Value())
if err != nil {
return err
}
header := &entry.header
// Determine the parent block node. Since the block headers are
// iterated in order of height, there is a very good chance the
// previous header processed is the parent.
var parent *blockNode
if lastNode == nil {
blockHash := header.BlockHash()
if blockHash != b.chainParams.GenesisHash {
return AssertError(fmt.Sprintf("initChainState: expected "+
"first entry in block index to be genesis block, "+
"found %s", blockHash))
}
} else if header.PrevBlock == lastNode.hash {
parent = lastNode
} else {
parent = b.index.lookupNode(&header.PrevBlock)
if parent == nil {
return AssertError(fmt.Sprintf("initChainState: could "+
"not find parent for block %s", header.BlockHash()))
}
}
// Initialize the block node, connect it, and add it to the block
// index.
node := &blockNodes[i]
initBlockNode(node, header, parent)
node.status = entry.status
node.ticketsVoted = entry.ticketsVoted
node.ticketsRevoked = entry.ticketsRevoked
node.votes = entry.voteInfo
b.index.addNode(node)
lastNode = node
i++
}
// Set the best chain to the stored best state.

View File

@ -81,7 +81,7 @@ func TestErrNotInMainChain(t *testing.T) {
// Ensure the stringized output for the error is as expected.
if err.Error() != errStr {
t.Fatalf("errNotInMainChain returned unexpected error string - "+
t.Fatalf("errNotInMainChain retuned unexpected error string - "+
"got %q, want %q", err.Error(), errStr)
}
@ -493,81 +493,53 @@ func TestStxoDecodeErrors(t *testing.T) {
tests := []struct {
name string
stxo spentTxOut
txVersion int32 // When the txout is not fully spent.
serialized []byte
errType error
bytesRead int // Expected number of bytes read.
errType error
}{
{
// [EOF]
name: "nothing serialized (no flags)",
name: "nothing serialized",
stxo: spentTxOut{},
serialized: hexToBytes(""),
errType: errDeserialize(""),
bytesRead: 0,
},
{
// [<flags 00> EOF]
name: "no compressed txout script version",
name: "no data after flags w/o version",
stxo: spentTxOut{},
serialized: hexToBytes("00"),
errType: errDeserialize(""),
bytesRead: 1,
},
{
// [<flags 10> <script version 00> EOF]
name: "no tx version data after empty script for a fully spent regular stxo",
name: "no data after flags code",
stxo: spentTxOut{},
serialized: hexToBytes("1000"),
serialized: hexToBytes("14"),
errType: errDeserialize(""),
bytesRead: 2,
bytesRead: 1,
},
{
// [<flags 10> <script version 00> <compressed pk script 01 6e ...> EOF]
name: "no tx version data after a pay-to-script-hash script for a fully spent regular stxo",
name: "no tx version data after script",
stxo: spentTxOut{},
serialized: hexToBytes("1000016edbc6c4d31bae9f1ccc38538a114bf42de65e86"),
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e86"),
errType: errDeserialize(""),
bytesRead: 23,
},
{
// [<flags 14> <script version 00> <compressed pk script 01 6e ...> <tx version 01> EOF]
name: "no stakeextra data after script for a fully spent ticket stxo",
name: "no stakeextra data after script for ticket",
stxo: spentTxOut{},
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e8601"),
errType: errDeserialize(""),
bytesRead: 24,
},
{
// [<flags 14> <script version 00> <compressed pk script 01 6e ...> <tx version 01> <stakeextra {num outputs 01}> EOF]
name: "truncated stakeextra data after script for a fully spent ticket stxo (num outputs only)",
name: "incomplete compressed txout",
stxo: spentTxOut{},
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e860101"),
txVersion: 1,
serialized: hexToBytes("1432"),
errType: errDeserialize(""),
bytesRead: 25,
},
{
// [<flags 14> <script version 00> <compressed pk script 01 6e ...> <tx version 01> <stakeextra {num outputs 01} {amount 0f}> EOF]
name: "truncated stakeextra data after script for a fully spent ticket stxo (num outputs and amount only)",
stxo: spentTxOut{},
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e8601010f"),
errType: errDeserialize(""),
bytesRead: 26,
},
{
// [<flags 14> <script version 00> <compressed pk script 01 6e ...> <tx version 01> <stakeextra {num outputs 01} {amount 0f} {script version 00}> EOF]
name: "truncated stakeextra data after script for a fully spent ticket stxo (num outputs, amount, and script version only)",
stxo: spentTxOut{},
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e8601010f00"),
errType: errDeserialize(""),
bytesRead: 27,
},
{
// [<flags 14> <script version 00> <compressed pk script 01 6e ...> <tx version 01> <stakeextra {num outputs 01} {amount 0f} {script version 00} {script size 1a} {25 bytes of script instead of 26}> EOF]
name: "truncated stakeextra data after script for a fully spent ticket stxo (script size specified as 0x1a, but only 0x19 bytes provided)",
stxo: spentTxOut{},
serialized: hexToBytes("1400016edbc6c4d31bae9f1ccc38538a114bf42de65e8601010f001aba76a9140cdf9941c0c221243cb8672cd1ad2c4c0933850588"),
errType: errDeserialize(""),
bytesRead: 28,
bytesRead: 2,
},
}
@ -931,7 +903,7 @@ func TestSpendJournalErrors(t *testing.T) {
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// transaction output entries works as expected.
// trasaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
t.Parallel()

View File

@ -259,7 +259,7 @@ func (c *chainView) next(node *blockNode) *blockNode {
}
// Next returns the successor to the provided node for the chain view. It will
// return nil if there is no successor or the provided node is not part of the
// return nil if there is no successfor or the provided node is not part of the
// view.
//
// For example, assume a block chain with a side chain as depicted below:

View File

@ -375,7 +375,7 @@ testLoop:
// TestChainViewNil ensures that creating and accessing a nil chain view behaves
// as expected.
func TestChainViewNil(t *testing.T) {
// Ensure two uninitialized views are considered equal.
// Ensure two unininitialized views are considered equal.
view := newChainView(nil)
if !view.Equals(newChainView(nil)) {
t.Fatal("uninitialized nil views unequal")

View File

@ -116,7 +116,7 @@ func chainSetup(dbName string, params *chaincfg.Params) (*BlockChain, func(), er
return chain, teardown, nil
}
// newFakeChain returns a chain that is usable for synthetic tests. It is
// newFakeChain returns a chain that is usable for syntetic tests. It is
// important to note that this chain has no database associated with it, so
// it is not usable with all functions and the tests must take care when making
// use of it.
@ -651,7 +651,7 @@ func (g *chaingenHarness) AdvanceToStakeValidationHeight() {
func (g *chaingenHarness) AdvanceFromSVHToActiveAgenda(voteID string) {
g.t.Helper()
// Find the correct deployment for the provided ID along with the yes
// Find the correct deployment for the provided ID along with the the yes
// vote choice within it.
params := g.Params()
deploymentVer, deployment, err := findDeployment(params, voteID)

View File

@ -9,7 +9,7 @@ import (
"fmt"
"github.com/decred/dcrd/blockchain/stake/v2"
"github.com/decred/dcrd/dcrec/secp256k1/v2"
"github.com/decred/dcrd/dcrec/secp256k1"
"github.com/decred/dcrd/txscript/v2"
)
@ -653,9 +653,9 @@ func decodeCompressedTxOut(serialized []byte, compressionVersion uint32,
// remaining for the compressed script.
var compressedAmount uint64
compressedAmount, bytesRead = deserializeVLQ(serialized)
if bytesRead == 0 {
if bytesRead >= len(serialized) {
return 0, 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data during decoding (compressed amount)")
"data after compressed amount")
}
amount = int64(decompressTxOutAmount(compressedAmount))
offset += bytesRead
@ -664,17 +664,12 @@ func decodeCompressedTxOut(serialized []byte, compressionVersion uint32,
// Decode the script version.
var scriptVersion uint64
scriptVersion, bytesRead = deserializeVLQ(serialized[offset:])
if bytesRead == 0 {
return 0, 0, nil, offset, errDeserialize("unexpected end of " +
"data during decoding (script version)")
}
offset += bytesRead
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[offset:],
compressionVersion)
// Note: scriptSize == 0 is OK (an empty compressed script is valid)
if scriptSize < 0 {
return 0, 0, nil, offset, errDeserialize("negative script size")
}
@ -723,7 +718,7 @@ const (
// from the flags byte.
txTypeBitmask = 0x0c
// txTypeShift is the number of bits to shift flags to the right to yield the
// txTypeShift is the number of bits to shift falgs to the right to yield the
// correct integer value after applying the bitmask with AND.
txTypeShift = 2
)

View File

@ -20,6 +20,14 @@ var (
// bigZero is 0 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigZero = big.NewInt(0)
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid
// the overhead of creating it multiple times.
oneLsh256 = new(big.Int).Lsh(bigOne, 256)
)
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
@ -606,7 +614,7 @@ func calcNextStakeDiffV2(params *chaincfg.Params, nextHeight, curDiff, prevPoolS
// Calculate the difficulty by multiplying the old stake difficulty
// with two ratios that represent a force to counteract the relative
// change in the pool size (Fc) and a restorative force to push the pool
// size towards the target value (Fr).
// size towards the target value (Fr).
//
// Per DCP0001, the generalized equation is:
//
@ -632,7 +640,7 @@ func calcNextStakeDiffV2(params *chaincfg.Params, nextHeight, curDiff, prevPoolS
// nextDiff = -----------------------------------
// prevPoolSizeAll * targetPoolSizeAll
//
// Further, the Sub parameter must calculate the denominator first using
// Further, the Sub parameter must calculate the denomitor first using
// integer math.
targetPoolSizeAll := votesPerBlock * (ticketPoolSize + ticketMaturity)
curPoolSizeAllBig := big.NewInt(curPoolSizeAll)

View File

@ -110,8 +110,8 @@ const (
// ErrUnexpectedDifficulty indicates specified bits do not align with
// the expected value either because it doesn't match the calculated
// value based on difficulty regarding the rules or it is out of the
// valid range.
// valued based on difficulty regarted rules or it is out of the valid
// range.
ErrUnexpectedDifficulty
// ErrHighHash indicates the block does not hash to a value which is
@ -390,7 +390,7 @@ const (
ErrRegTxCreateStakeOut
// ErrInvalidFinalState indicates that the final state of the PRNG included
// in the block differed from the calculated final state.
// in the the block differed from the calculated final state.
ErrInvalidFinalState
// ErrPoolSize indicates an error in the ticket pool size for this block.
@ -615,7 +615,7 @@ func (e RuleError) Error() string {
return e.Description
}
// ruleError creates a RuleError given a set of arguments.
// ruleError creates an RuleError given a set of arguments.
func ruleError(c ErrorCode, desc string) RuleError {
return RuleError{ErrorCode: c, Description: desc}
}

View File

@ -7,6 +7,7 @@ package blockchain_test
import (
"fmt"
"math/big"
"os"
"path/filepath"
@ -18,7 +19,7 @@ import (
)
// This example demonstrates how to create a new chain instance and use
// ProcessBlock to attempt to add a block to the chain. As the package
// ProcessBlock to attempt to attempt add a block to the chain. As the package
// overview documentation describes, this includes all of the Decred consensus
// rules. This example intentionally attempts to insert a duplicate genesis
// block to illustrate how an invalid block is handled.
@ -75,3 +76,37 @@ func ExampleBlockChain_ProcessBlock() {
// Output:
// Failed to process block: already have block 267a53b5ee86c24a48ec37aee4f4e7c0c4004892b7259e695e9f5b321f1ab9d2
}
// This example demonstrates how to convert the compact "bits" in a block header
// which represent the target difficulty to a big integer and display it using
// the typical hex notation.
func ExampleCompactToBig() {
// Convert the bits from block 300000 in the main Decred block chain.
bits := uint32(419465580)
targetDifficulty := blockchain.CompactToBig(bits)
// Display it in hex.
fmt.Printf("%064x\n", targetDifficulty.Bytes())
// Output:
// 0000000000000000896c00000000000000000000000000000000000000000000
}
// This example demonstrates how to convert a target difficulty into the compact
// "bits" in a block header which represent that target difficulty .
func ExampleBigToCompact() {
// Convert the target difficulty from block 300000 in the main block
// chain to compact form.
t := "0000000000000000896c00000000000000000000000000000000000000000000"
targetDifficulty, success := new(big.Int).SetString(t, 16)
if !success {
fmt.Println("invalid target difficulty")
return
}
bits := blockchain.BigToCompact(targetDifficulty)
fmt.Println(bits)
// Output:
// 419465580
}

View File

@ -1,7 +1,7 @@
fullblocktests
==============
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/blockchain/fullblocktests)

View File

@ -17,7 +17,7 @@ import (
"github.com/decred/dcrd/blockchain/v2/chaingen"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrec"
"github.com/decred/dcrd/dcrec/secp256k1/v2"
"github.com/decred/dcrd/dcrec/secp256k1"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/txscript/v2"
"github.com/decred/dcrd/wire"
@ -272,7 +272,7 @@ func replaceStakeSigScript(sigScript []byte) func(*wire.MsgBlock) {
}
// additionalPoWTx returns a function that itself takes a block and modifies it
// by adding the provided transaction to the regular transaction tree.
// by adding the the provided transaction to the regular transaction tree.
func additionalPoWTx(tx *wire.MsgTx) func(*wire.MsgBlock) {
return func(b *wire.MsgBlock) {
b.AddTransaction(tx)
@ -307,8 +307,8 @@ func encodeNonCanonicalBlock(b *wire.MsgBlock) []byte {
return buf.Bytes()
}
// assertTipsNonCanonicalBlockSize panics if the current tip block associated
// with the generator does not have the specified non-canonical size
// assertTipsNonCanonicalBlockSize panics if the if the current tip block
// associated with the generator does not have the specified non-canonical size
// when serialized.
func assertTipNonCanonicalBlockSize(g *chaingen.Generator, expected int) {
tip := g.Tip()
@ -726,7 +726,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ---------------------------------------------------------------------
// The comments below identify the structure of the chain being built.
//
// The values in parenthesis represent which outputs are being spent.
// The values in parenthesis repesent which outputs are being spent.
//
// For example, b1(0) indicates the first collected spendable output
// which, due to the code above to create the correct number of blocks,
@ -1879,8 +1879,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Create block with duplicate transactions in the regular transaction
// tree.
//
// This test relies on the shape of the merkle tree to test the
// intended condition. That is the reason for the assertion.
// This test relies on the shape of the shape of the merkle tree to test
// the intended condition. That is the reason for the assertion.
//
// ... -> brs3(14)
// \-> bmf14(15)

View File

@ -3,22 +3,16 @@ module github.com/decred/dcrd/blockchain/v2
go 1.11
require (
github.com/dchest/blake256 v1.1.0 // indirect
github.com/decred/dcrd/blockchain/stake/v2 v2.0.1
github.com/decred/dcrd/blockchain/standalone v1.0.0
github.com/decred/dcrd/chaincfg/chainhash v1.0.2
github.com/decred/dcrd/chaincfg/v2 v2.2.0
github.com/decred/dcrd/database/v2 v2.0.0
github.com/decred/dcrd/dcrec v1.0.0
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0
github.com/decred/dcrd/dcrec/secp256k1 v1.0.2
github.com/decred/dcrd/dcrutil/v2 v2.0.0
github.com/decred/dcrd/gcs/v2 v2.0.0-00010101000000-000000000000
github.com/decred/dcrd/gcs v1.1.0
github.com/decred/dcrd/txscript/v2 v2.0.0
github.com/decred/dcrd/wire v1.2.0
github.com/decred/slog v1.0.0
)
replace (
github.com/decred/dcrd/chaincfg/v2 => ../chaincfg
github.com/decred/dcrd/gcs/v2 => ../gcs
)

View File

@ -10,8 +10,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake256 v1.0.0 h1:6gUgI5MHdz9g0TdrgKqXsoDX+Zjxmm1Sc6OsoGru50I=
github.com/dchest/blake256 v1.0.0/go.mod h1:xXNWCE1jsAP8DAjP+rKw2MbeqLczjI3TRx2VK+9OEYY=
github.com/dchest/blake256 v1.1.0 h1:4AuEhGPT/3TTKFhTfBpZ8hgZE7wJpawcYaEawwsbtqM=
github.com/dchest/blake256 v1.1.0/go.mod h1:xXNWCE1jsAP8DAjP+rKw2MbeqLczjI3TRx2VK+9OEYY=
github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=
github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4=
github.com/decred/base58 v1.0.0 h1:BVi1FQCThIjZ0ehG+I99NJ51o0xcc9A/fDKhmJxY6+w=
@ -43,10 +41,10 @@ github.com/decred/dcrd/dcrec/secp256k1 v1.0.1 h1:EFWVd1p0t0Y5tnsm/dJujgV0ORogRJ6
github.com/decred/dcrd/dcrec/secp256k1 v1.0.1/go.mod h1:lhu4eZFSfTJWUnR3CFRcpD+Vta0KUAqnhTsTksHXgy0=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.2 h1:awk7sYJ4pGWmtkiGHFfctztJjHMKGLV8jctGQhAbKe0=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.2/go.mod h1:CHTUIVfmDDd0KFVFpNX1pFVCBUegxW387nN0IGwNKR0=
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0 h1:3GIJYXQDAKpLEFriGFN8SbSffak10UXHGdIcFaMPykY=
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0/go.mod h1:3s92l0paYkZoIHuj4X93Teg/HB7eGM9x/zokGw+u4mY=
github.com/decred/dcrd/dcrutil/v2 v2.0.0 h1:HTqn2tZ8eqBF4y3hJwjyKBmJt16y7/HjzpE82E/crhY=
github.com/decred/dcrd/dcrutil/v2 v2.0.0/go.mod h1:gUshVAXpd51DlcEhr51QfWL2HJGkMDM1U8chY+9VvQg=
github.com/decred/dcrd/gcs v1.1.0 h1:djuYzaFUzUTJR+6ulMSRZOQ+P9rxtIyuxQeViAEfB8s=
github.com/decred/dcrd/gcs v1.1.0/go.mod h1:yBjhj217Vw5lw3aKnCdHip7fYb9zwMos8bCy5s79M9w=
github.com/decred/dcrd/txscript/v2 v2.0.0 h1:So+NcQY58mDHDN2N2edED5syGZp2ed8Ltxj8mDE5CAs=
github.com/decred/dcrd/txscript/v2 v2.0.0/go.mod h1:WStcyYYJa+PHJB4XjrLDRzV96/Z4thtsu8mZoVrU6C0=
github.com/decred/dcrd/wire v1.2.0 h1:HqJVB7vcklIguzFWgRXw/WYCQ9cD3bUC5TKj53i1Hng=

View File

@ -1,7 +1,7 @@
indexers
========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://travis-ci.org/decred/dcrd.png?branch=master)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://godoc.org/github.com/decred/dcrd/blockchain/indexers?status.png)](https://godoc.org/github.com/decred/dcrd/blockchain/indexers)

View File

@ -42,8 +42,7 @@ const (
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifies the
// level.
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
@ -160,7 +159,7 @@ func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc, blockIndex uint32
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to convert the block ID
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, entry *TxIndexEntry, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
@ -362,7 +361,7 @@ func maxEntriesForLevel(level uint8) int {
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
@ -504,7 +503,7 @@ func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte,
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another iteration to
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {

View File

@ -118,7 +118,7 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can't have more entries than the max allowed if the
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]

View File

@ -14,8 +14,8 @@ import (
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/database/v2"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/gcs/v2"
"github.com/decred/dcrd/gcs/v2/blockcf"
"github.com/decred/dcrd/gcs"
"github.com/decred/dcrd/gcs/blockcf"
"github.com/decred/dcrd/wire"
)
@ -174,7 +174,7 @@ func (idx *CFIndex) Create(dbTx database.Tx) error {
// storeFilter stores a given filter, and performs the steps needed to
// generate the filter's header.
func storeFilter(dbTx database.Tx, block *dcrutil.Block, f *gcs.FilterV1, filterType wire.FilterType) error {
func storeFilter(dbTx database.Tx, block *dcrutil.Block, f *gcs.Filter, filterType wire.FilterType) error {
if uint8(filterType) > maxFilterType {
return errors.New("unsupported filter type")
}
@ -187,7 +187,7 @@ func storeFilter(dbTx database.Tx, block *dcrutil.Block, f *gcs.FilterV1, filter
h := block.Hash()
var basicFilterBytes []byte
if f != nil {
basicFilterBytes = f.Bytes()
basicFilterBytes = f.NBytes()
}
err := dbStoreFilter(dbTx, fkey, h, basicFilterBytes)
if err != nil {
@ -215,7 +215,7 @@ func storeFilter(dbTx database.Tx, block *dcrutil.Block, f *gcs.FilterV1, filter
// every passed block. This is part of the Indexer interface.
func (idx *CFIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error {
f, err := blockcf.Regular(block.MsgBlock())
if err != nil {
if err != nil && err != gcs.ErrNoData {
return err
}
@ -225,7 +225,7 @@ func (idx *CFIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block,
}
f, err = blockcf.Extended(block.MsgBlock())
if err != nil {
if err != nil && err != gcs.ErrNoData {
return err
}

View File

@ -19,7 +19,7 @@ const (
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likely wrong and
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)

View File

@ -29,10 +29,10 @@ const (
// It should be noted that the block might still ultimately fail to
// become the new main chain tip if it contains invalid scripts, double
// spends, etc. However, this is quite rare in practice because a lot
// of work was expended to create a block which satisfies the proof of
// of work was expended to create a block which satisifies the proof of
// work requirement.
//
// Finally, this notification is only sent if the chain is believed
// Finally, this notification is only sent if the the chain is believed
// to be current and the chain lock is NOT released, so consumers must
// take care to avoid calling blockchain functions to avoid potential
// deadlock.

View File

@ -98,11 +98,11 @@ func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) e
// the block chain along with best chain selection and reorganization.
//
// When no errors occurred during processing, the first return value indicates
// the length of the fork the block extended. In the case it either extended
// the length of the fork the block extended. In the case it either exteneded
// the best chain or is now the tip of the best chain due to causing a
// reorganize, the fork length will be 0. The second return value indicates
// whether or not the block is an orphan, in which case the fork length will
// also be zero as expected, because it, by definition, does not connect to the
// also be zero as expected, because it, by definition, does not connect ot the
// best chain.
//
// This function is safe for concurrent access.

View File

@ -223,7 +223,7 @@ func TestCalcSequenceLock(t *testing.T) {
{
// A transaction with a single input. The input's
// sequence number encodes a relative locktime in blocks
// (3 blocks). The sequence lock should have a value
// (3 blocks). The sequence lock should have a value
// of -1 for seconds, but a height of 2 meaning it can
// be included at height 3.
name: "3 blocks",
@ -381,7 +381,7 @@ func TestCalcSequenceLock(t *testing.T) {
// Ensure both the returned sequence lock seconds and block
// height match the expected values.
if seqLock.MinTime != test.want.MinTime {
t.Errorf("%s: mismatched seconds - got %v, want %v",
t.Errorf("%s: mistmached seconds - got %v, want %v",
test.name, seqLock.MinTime, test.want.MinTime)
continue
}

View File

@ -38,7 +38,7 @@ const (
// OP_RETURNs were missing or contained invalid addresses.
ErrSStxInvalidOutputs
// ErrSStxInOutProportions indicates the number of inputs in an SStx
// ErrSStxInOutProportions indicates the the number of inputs in an SStx
// was not equal to the number of output minus one.
ErrSStxInOutProportions
@ -249,7 +249,7 @@ func (e RuleError) GetCode() ErrorCode {
return e.ErrorCode
}
// stakeRuleError creates a RuleError given a set of arguments.
// stakeRuleError creates an RuleError given a set of arguments.
func stakeRuleError(c ErrorCode, desc string) RuleError {
return RuleError{ErrorCode: c, Description: desc}
}

View File

@ -59,7 +59,7 @@ const (
// v: height
//
// 4. BlockUndo
// Block removal data, for reverting the first 3 database buckets to
// Block removal data, for reverting the the first 3 database buckets to
// a previous state.
//
// k: height

View File

@ -70,7 +70,7 @@ func (e ErrorCode) String() string {
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
}
// DBError identifies an error in the stake database for tickets.
// DBError identifies a an error in the stake database for tickets.
// The caller can use type assertions to determine if a failure was
// specifically due to a rule violation and access the ErrorCode field to
// ascertain the specific reason for the rule violation.

View File

@ -1,7 +1,7 @@
tickettreap
===========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/blockchain/stake/internal/tickettreap)

View File

@ -15,7 +15,7 @@ const numTicketKeys = 42500
var (
// generatedTicketKeys is used to store ticket keys generated for use
// in the benchmarks so that they only need to be generated once for all
// in the benchmarks so that they only need to be generatd once for all
// benchmarks that use them.
genTicketKeysLock sync.Mutex
generatedTicketKeys []Key

View File

@ -179,7 +179,7 @@ func (s *parentStack) Push(node *treapNode) {
// This approach is used over append because reslicing the slice to pop
// the item causes the compiler to make unneeded allocations. Also,
// since the max number of items is related to the tree depth which
// requires exponentially more items to increase, only increase the cap
// requires expontentially more items to increase, only increase the cap
// one item at a time. This is more intelligent than the generic append
// expansion algorithm which often doubles the cap.
index := s.index - staticDepth

View File

@ -57,7 +57,7 @@ type Immutable struct {
root *treapNode
count int
// totalSize is the best estimate of the total size of all data in
// totalSize is the best estimate of the total size of of all data in
// the treap including the keys, values, and node sizes.
totalSize uint64
}

View File

@ -373,7 +373,7 @@ func TestImmutableReverseSequential(t *testing.T) {
}
// TestImmutableUnordered ensures that putting keys into an immutable treap in
// no particular order works as expected.
// no paritcular order works as expected.
func TestImmutableUnordered(t *testing.T) {
t.Parallel()
@ -463,7 +463,7 @@ func TestImmutableDuplicatePut(t *testing.T) {
testTreap = testTreap.Put(key, value)
expectedSize += nodeFieldsSize + uint64(len(key)) + nodeValueSize
// Put a duplicate key with the expected final value.
// Put a duplicate key with the the expected final value.
testTreap = testTreap.Put(key, expectedVal)
// Ensure the key still exists and is the new value.

View File

@ -65,12 +65,12 @@ const (
// hash of the block in which voting was missed.
MaxOutputsPerSSRtx = MaxInputsPerSStx
// SStxPKHMinOutSize is the minimum size of an OP_RETURN commitment output
// SStxPKHMinOutSize is the minimum size of of an OP_RETURN commitment output
// for an SStx tx.
// 20 bytes P2SH/P2PKH + 8 byte amount + 4 byte fee range limits
SStxPKHMinOutSize = 32
// SStxPKHMaxOutSize is the maximum size of an OP_RETURN commitment output
// SStxPKHMaxOutSize is the maximum size of of an OP_RETURN commitment output
// for an SStx tx.
SStxPKHMaxOutSize = 77
@ -842,7 +842,7 @@ func CheckSSGen(tx *wire.MsgTx) error {
}
// IsSSGen returns whether or not a transaction is a stake submission generation
// transaction. These are also known as votes.
// transaction. There are also known as votes.
func IsSSGen(tx *wire.MsgTx) bool {
return CheckSSGen(tx) == nil
}
@ -937,7 +937,7 @@ func CheckSSRtx(tx *wire.MsgTx) error {
}
// IsSSRtx returns whether or not a transaction is a stake submission revocation
// transaction. These are also known as revocations.
// transaction. There are also known as revocations.
func IsSSRtx(tx *wire.MsgTx) bool {
return CheckSSRtx(tx) == nil
}

View File

@ -256,7 +256,7 @@ func TestTicketDBLongChain(t *testing.T) {
filename := filepath.Join("testdata", "testexpiry.bz2")
fi, err := os.Open(filename)
if err != nil {
t.Fatalf("failed to open test data: %v", err)
t.Fatalf("failed ot open test data: %v", err)
}
bcStream := bzip2.NewReader(fi)
defer fi.Close()

View File

@ -1,7 +1,7 @@
standalone
==========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/blockchain/standalone)

View File

@ -190,7 +190,7 @@ func (c *SubsidyCache) CalcBlockSubsidy(height int64) int64 {
// subsidy for the requested interval.
if reqInterval > lastCachedInterval {
// Return zero for all intervals after the subsidy reaches zero. This
// enforces an upper bound on the number of entries in the cache.
// enforces an upper bound on the the number of entries in the cache.
if lastCachedSubsidy == 0 {
return 0
}

View File

@ -188,7 +188,7 @@ func (c *thresholdStateCache) Update(hash chainhash.Hash, state ThresholdStateTu
c.entries[hash] = state
}
// MarkFlushed marks all of the current updates as flushed to the database.
// MarkFlushed marks all of the current udpates as flushed to the database.
// This is useful so the caller can ensure the needed database updates are not
// lost until they have successfully been written to the database.
func (c *thresholdStateCache) MarkFlushed() {
@ -531,7 +531,7 @@ func (b *BlockChain) StateLastChangedHeight(hash *chainhash.Hash, version uint32
return 0, HashError(hash.String())
}
// Fetch the threshold state cache for the provided deployment id as well as
// Fetch the treshold state cache for the provided deployment id as well as
// the condition checker.
var cache *thresholdStateCache
var checker thresholdConditionChecker
@ -666,9 +666,9 @@ func (b *BlockChain) isFixSeqLocksAgendaActive(prevNode *blockNode) (bool, error
return state.State == ThresholdActive, nil
}
// IsFixSeqLocksAgendaActive returns whether or not the fix sequence locks
// agenda vote, as defined in DCP0004 has passed and is now active for the
// block AFTER the current best chain block.
// IsFixSeqLocksAgendaActive returns whether or not whether or not the fix
// sequence locks agenda vote, as defined in DCP0004 has passed and is now
// active for the block AFTER the current best chain block.
//
// This function is safe for concurrent access.
func (b *BlockChain) IsFixSeqLocksAgendaActive() (bool, error) {

View File

@ -250,7 +250,7 @@ func TestThresholdState(t *testing.T) {
// version 3.
//
// This will result in triggering enforcement of the stake version and
// that the stake version is 3. The threshold state for the test dummy
// that the stake version is 3. The treshold state for the test dummy
// deployments must still be defined since a v4 majority proof-of-work
// and proof-of-stake upgrade are required before moving to started.
// ---------------------------------------------------------------------
@ -308,7 +308,7 @@ func TestThresholdState(t *testing.T) {
//
// This will result in achieving stake version 4 enforcement.
//
// The threshold state for the dummy deployments must still be defined
// The treshold state for the dummy deployments must still be defined
// since it can only change on a rule change boundary and it still
// requires a v4 majority proof-of-work upgrade before moving to
// started.
@ -338,7 +338,7 @@ func TestThresholdState(t *testing.T) {
// the final two blocks to block version 4 so that majority version 4
// is not achieved, but the final block in the interval is version 4.
//
// The threshold state for the dummy deployments must still be defined
// The treshold state for the dummy deployments must still be defined
// since it still requires a v4 majority proof-of-work upgrade before
// moving to started.
// ---------------------------------------------------------------------
@ -375,7 +375,7 @@ func TestThresholdState(t *testing.T) {
// achieved and this will achieve v4 majority proof-of-work upgrade,
// voting can begin at the next rule change interval.
//
// The threshold state for the dummy deployments must still be defined
// The treshold state for the dummy deployments must still be defined
// since even though all required upgrade conditions are met, the state
// change must not happen until the start of the next rule change
// interval.
@ -405,7 +405,7 @@ func TestThresholdState(t *testing.T) {
// vote bits to include yes votes for the first test dummy agenda and
// no for the second test dummy agenda to ensure they aren't counted.
//
// The threshold state for the dummy deployments must move to started.
// The treshold state for the dummy deployments must move to started.
// Even though the majority of the votes have already been voting yes
// for the first test dummy agenda, and no for the second one, they must
// not count, otherwise it would move straight to lockedin or failed,
@ -437,7 +437,7 @@ func TestThresholdState(t *testing.T) {
// vote bits to include yes votes for the first test dummy agenda and
// no for the second test dummy agenda to ensure they aren't counted.
//
// The threshold state for the dummy deployments must remain in started
// The treshold state for the dummy deployments must remain in started
// because the votes are an old version and thus have a different
// definition and don't apply to version 4.
// ---------------------------------------------------------------------
@ -468,7 +468,7 @@ func TestThresholdState(t *testing.T) {
// votes for the first test dummy agenda and a majority no for the
// second test dummy agenda.
//
// The threshold state for the dummy deployments must remain in started
// The treshold state for the dummy deployments must remain in started
// because quorum was not reached.
// ---------------------------------------------------------------------
@ -504,7 +504,7 @@ func TestThresholdState(t *testing.T) {
// majority yes for the first test dummy agenda and a few votes shy of a
// majority no for the second test dummy agenda.
//
// The threshold state for the dummy deployments must remain in started
// The treshold state for the dummy deployments must remain in started
// because even though quorum was reached, a required majority was not.
// ---------------------------------------------------------------------
@ -547,7 +547,7 @@ func TestThresholdState(t *testing.T) {
// vote bits to yes for the first test dummy agenda and no to the second
// one.
//
// The threshold state for the first dummy deployment must move to
// The treshold state for the first dummy deployment must move to
// lockedin since a majority yes vote was achieved while the second
// dummy deployment must move to failed since a majority no vote was
// achieved.
@ -578,12 +578,12 @@ func TestThresholdState(t *testing.T) {
// vote bits to include no votes for the first test dummy agenda and
// yes votes for the second one.
//
// The threshold state for the first dummy deployment must move to
// active since even though the interval had a majority no votes,
// lockedin status has already been achieved and can't be undone without
// a new agenda. Similarly, the second one must remain in failed even
// though the interval had a majority yes votes since a failed state
// can't be undone.
// The treshold state for the first dummy deployment must move to active
// since even though the interval had a majority no votes, lockedin
// status has already been achieved and can't be undone without a new
// agenda. Similarly, the second one must remain in failed even though
// the interval had a majority yes votes since a failed state can't be
// undone.
// ---------------------------------------------------------------------
blocksNeeded = stakeValidationHeight + ruleChangeInterval*8 - 1 -

View File

@ -21,7 +21,7 @@ func (s timeSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the timestamp with index i should sort before the
// Less returns whether the timstamp with index i should sort before the
// timestamp with index j. It is part of the sort.Interface implementation.
func (s timeSorter) Less(i, j int) bool {
return s[i] < s[j]

View File

@ -242,7 +242,7 @@ func upgradeToVersion2(db database.DB, chainParams *chaincfg.Params, dbInfo *dat
}
// migrateBlockIndex migrates all block entries from the v1 block index bucket
// managed by ffldb to the v2 bucket managed by this package. The v1 bucket
// manged by ffldb to the v2 bucket managed by this package. The v1 bucket
// stored all block entries keyed by block hash, whereas the v2 bucket stores
// them keyed by block height + hash. Also, the old block index only stored the
// header, while the new one stores all info needed to recreate block nodes.

View File

@ -725,7 +725,7 @@ func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, flags B
return ruleError(ErrTooManyRevocations, errStr)
}
// A block must only contain stake transactions of the allowed
// A block must only contain stake transactions of the the allowed
// types.
//
// NOTE: This is not possible to hit at the time this comment was
@ -752,7 +752,7 @@ func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, flags B
return ruleError(ErrFreshStakeMismatch, errStr)
}
// A block header must commit to the actual number of votes that are
// A block header must commit to the the actual number of votes that are
// in the block.
if int64(header.Voters) != totalVotes {
errStr := fmt.Sprintf("block header commitment to %d votes "+
@ -1027,7 +1027,7 @@ func (b *BlockChain) checkBlockHeaderPositional(header *wire.BlockHeader, prevNo
//
// The flags modify the behavior of this function as follows:
// - BFFastAdd: The transactions are not checked to see if they are expired and
// the coinbase height check is not performed.
// the coinbae height check is not performed.
//
// The flags are also passed to checkBlockHeaderPositional. See its
// documentation for how the flags modify its behavior.
@ -1496,7 +1496,7 @@ func isStakeScriptHash(script []byte, stakeOpcode byte) bool {
}
// isAllowedTicketInputScriptForm returns whether or not the passed public key
// script is one of the allowed forms for a ticket input.
// script is a one of the allowed forms for a ticket input.
func isAllowedTicketInputScriptForm(script []byte) bool {
return isPubKeyHash(script) || isScriptHash(script) ||
isStakePubKeyHash(script, txscript.OP_SSGEN) ||
@ -1726,7 +1726,7 @@ func checkTicketRedeemerCommitments(ticketHash *chainhash.Hash, ticketOuts []*st
}
contributionSumBig := big.NewInt(contributionSum)
// The outputs that satisfy the commitments of the ticket start at offset
// The outputs that satisify the commitments of the ticket start at offset
// 2 for votes while they start at 0 for revocations. Also, the payments
// must be tagged with the appropriate stake opcode depending on whether it
// is a vote or a revocation. Finally, the fee limits in the original
@ -1794,7 +1794,7 @@ func checkTicketRedeemerCommitments(ticketHash *chainhash.Hash, ticketOuts []*st
// revocations).
//
// It should be noted that, due to the scaling, the sum of the generated
// amounts for multi-participant votes might be a few atoms less than
// amounts for mult-participant votes might be a few atoms less than
// the full amount and the difference is treated as a standard
// transaction fee.
commitmentAmt := extractTicketCommitAmount(commitmentScript)
@ -1803,7 +1803,7 @@ func checkTicketRedeemerCommitments(ticketHash *chainhash.Hash, ticketOuts []*st
// Ensure the amount paid adheres to the commitment while taking into
// account any fee limits that might be imposed. The output amount must
// exactly match the calculated amount when not encumbered with a
// exactly match the calculated amount when when not encumbered with a
// fee limit. On the other hand, when it is encumbered, it must be
// between the minimum amount imposed by the fee limit and the
// calculated amount.
@ -1908,7 +1908,7 @@ func checkVoteInputs(subsidyCache *standalone.SubsidyCache, tx *dcrutil.Tx, txHe
ticketHash := &ticketIn.PreviousOutPoint.Hash
ticketUtxo := view.LookupEntry(ticketHash)
if ticketUtxo == nil || ticketUtxo.IsFullySpent() {
str := fmt.Sprintf("ticket output %v referenced by vote %s:%d either "+
str := fmt.Sprintf("ticket output %v referenced by vote %s:%d either "+
"does not exist or has already been spent",
ticketIn.PreviousOutPoint, voteHash, ticketInIdx)
return ruleError(ErrMissingTxOut, str)
@ -2096,7 +2096,7 @@ func CheckTransactionInputs(subsidyCache *standalone.SubsidyCache, tx *dcrutil.T
}
}
// Perform additional checks on vote transactions such as verifying that the
// Perform additional checks on vote transactions such as verying that the
// referenced ticket exists, the stakebase input commits to correct subsidy,
// the output amounts adhere to the commitments of the referenced ticket,
// and the ticket maturity requirements are met.

View File

@ -215,7 +215,7 @@ func TestSequenceLocksActive(t *testing.T) {
got := SequenceLockActive(&seqLock, test.blockHeight,
time.Unix(test.medianTime, 0))
if got != test.want {
t.Errorf("%s: mismatched sequence lock status - got %v, "+
t.Errorf("%s: mismatched seqence lock status - got %v, "+
"want %v", test.name, got, test.want)
continue
}

View File

@ -66,7 +66,7 @@ var (
},
{
Id: "Vote against",
Description: "Vote against all multiple",
Description: "Vote against all multiple ",
Bits: 0x20, // 0b0010 0000
IsAbstain: false,
IsNo: true,

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -8,7 +8,7 @@ import (
"sync"
"time"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/slog"
)

View File

@ -9,20 +9,21 @@ import (
"container/list"
"encoding/binary"
"fmt"
"math/rand"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/decred/dcrd/blockchain/standalone"
"github.com/decred/dcrd/blockchain/v2"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/database/v2"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/fees/v2"
"github.com/decred/dcrd/mempool/v3"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/fees"
"github.com/decred/dcrd/mempool/v2"
"github.com/decred/dcrd/wire"
)
@ -238,6 +239,53 @@ type isCurrentMsg struct {
reply chan bool
}
// getCurrentTemplateMsg handles a request for the current mining block template.
type getCurrentTemplateMsg struct {
reply chan getCurrentTemplateResponse
}
// getCurrentTemplateResponse is a response sent to the reply channel of a
// getCurrentTemplateMsg.
type getCurrentTemplateResponse struct {
Template *BlockTemplate
}
// setCurrentTemplateMsg handles a request to change the current mining block
// template.
type setCurrentTemplateMsg struct {
Template *BlockTemplate
reply chan setCurrentTemplateResponse
}
// setCurrentTemplateResponse is a response sent to the reply channel of a
// setCurrentTemplateMsg.
type setCurrentTemplateResponse struct {
}
// getParentTemplateMsg handles a request for the current parent mining block
// template.
type getParentTemplateMsg struct {
reply chan getParentTemplateResponse
}
// getParentTemplateResponse is a response sent to the reply channel of a
// getParentTemplateMsg.
type getParentTemplateResponse struct {
Template *BlockTemplate
}
// setParentTemplateMsg handles a request to change the parent mining block
// template.
type setParentTemplateMsg struct {
Template *BlockTemplate
reply chan setParentTemplateResponse
}
// setParentTemplateResponse is a response sent to the reply channel of a
// setParentTemplateMsg.
type setParentTemplateResponse struct {
}
// headerNode is used as a node in a list of headers that are linked together
// between checkpoints.
type headerNode struct {
@ -248,10 +296,11 @@ type headerNode struct {
// PeerNotifier provides an interface for server peer notifications.
type PeerNotifier interface {
// AnnounceNewTransactions generates and relays inventory vectors and
// notifies websocket clients of the passed transactions.
// notifies both websocket and getblocktemplate long poll clients of
// the passed transactions.
AnnounceNewTransactions(txns []*dcrutil.Tx)
// UpdatePeerHeights updates the heights of all peers who have
// UpdatePeerHeights updates the heights of all peers who have have
// announced the latest connected main chain block, or a recognized orphan.
UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int64, updateSource *serverPeer)
@ -270,9 +319,8 @@ type blockManagerConfig struct {
TimeSource blockchain.MedianTimeSource
// The following fields are for accessing the chain and its configuration.
Chain *blockchain.BlockChain
ChainParams *chaincfg.Params
SubsidyCache *standalone.SubsidyCache
Chain *blockchain.BlockChain
ChainParams *chaincfg.Params
// The following fields provide access to the fee estimator, mempool and
// the background block template generator.
@ -280,7 +328,7 @@ type blockManagerConfig struct {
TxMemPool *mempool.TxPool
BgBlkTmplGenerator *BgBlkTmplGenerator
// The following fields are blockManager callbacks.
// The following fields are blockManger callbacks.
NotifyWinningTickets func(*WinningTicketsNtfnData)
PruneRebroadcastInventory func()
RpcServer func() *rpcServer
@ -292,6 +340,7 @@ type blockManager struct {
cfg *blockManagerConfig
started int32
shutdown int32
chain *blockchain.BlockChain
rejectedTxns map[chainhash.Hash]struct{}
requestedTxns map[chainhash.Hash]struct{}
requestedBlocks map[chainhash.Hash]struct{}
@ -314,7 +363,9 @@ type blockManager struct {
lotteryDataBroadcast map[chainhash.Hash]struct{}
lotteryDataBroadcastMutex sync.RWMutex
AggressiveMining bool
cachedCurrentTemplate *BlockTemplate
cachedParentTemplate *BlockTemplate
AggressiveMining bool
// The following fields are used to filter duplicate block announcements.
announcedBlockMtx sync.Mutex
@ -359,7 +410,7 @@ func (b *blockManager) findNextHeaderCheckpoint(height int64) *chaincfg.Checkpoi
if cfg.DisableCheckpoints {
return nil
}
checkpoints := b.cfg.Chain.Checkpoints()
checkpoints := b.chain.Checkpoints()
if len(checkpoints) == 0 {
return nil
}
@ -382,20 +433,6 @@ func (b *blockManager) findNextHeaderCheckpoint(height int64) *chaincfg.Checkpoi
return nextCheckpoint
}
// chainBlockLocatorToHashes converts a block locator from chain to a slice
// of hashes.
func chainBlockLocatorToHashes(locator blockchain.BlockLocator) []chainhash.Hash {
if len(locator) == 0 {
return nil
}
result := make([]chainhash.Hash, 0, len(locator))
for _, hash := range locator {
result = append(result, *hash)
}
return result
}
// startSync will choose the best peer among the available candidate peers to
// download/sync the blockchain from. When syncing is already running, it
// simply returns. It also examines the candidates for any which are no longer
@ -406,7 +443,7 @@ func (b *blockManager) startSync(peers *list.List) {
return
}
best := b.cfg.Chain.BestSnapshot()
best := b.chain.BestSnapshot()
var bestPeer *serverPeer
var enext *list.Element
for e := peers.Front(); e != nil; e = enext {
@ -415,7 +452,7 @@ func (b *blockManager) startSync(peers *list.List) {
// Remove sync candidate peers that are no longer candidates due
// to passing their latest known block. NOTE: The < is
// intentional as opposed to <=. While technically the peer
// intentional as opposed to <=. While techcnically the peer
// doesn't have a later block when it's equal, it will likely
// have one soon so it is a reasonable choice. It also allows
// the case where both are at 0 such as during regression test.
@ -440,13 +477,12 @@ func (b *blockManager) startSync(peers *list.List) {
// to send.
b.requestedBlocks = make(map[chainhash.Hash]struct{})
blkLocator, err := b.cfg.Chain.LatestBlockLocator()
locator, err := b.chain.LatestBlockLocator()
if err != nil {
bmgrLog.Errorf("Failed to get block locator for the "+
"latest block: %v", err)
return
}
locator := chainBlockLocatorToHashes(blkLocator)
bmgrLog.Infof("Syncing to block height %d from peer %v",
bestPeer.LastBlock(), bestPeer.Addr())
@ -506,8 +542,8 @@ func (b *blockManager) isSyncCandidate(sp *serverPeer) bool {
return sp.Services()&wire.SFNodeNetwork == wire.SFNodeNetwork
}
// syncMiningStateAfterSync polls the blockManager for the current sync
// state; if the manager is synced, it executes a call to the peer to
// syncMiningStateAfterSync polls the blockMananger for the current sync
// state; if the mananger is synced, it executes a call to the peer to
// sync the mining state to the network.
func (b *blockManager) syncMiningStateAfterSync(sp *serverPeer) {
go func() {
@ -588,90 +624,13 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, sp *serverPeer) {
if b.syncPeer != nil && b.syncPeer == sp {
b.syncPeer = nil
if b.headersFirstMode {
best := b.cfg.Chain.BestSnapshot()
best := b.chain.BestSnapshot()
b.resetHeaderState(&best.Hash, best.Height)
}
b.startSync(peers)
}
}
// errToWireRejectCode determines the wire rejection code and description for a
// given error. This function can convert some select blockchain and mempool
// error types to the historical rejection codes used on the p2p wire protocol.
func errToWireRejectCode(err error) (wire.RejectCode, string) {
// Unwrap mempool errors.
if rerr, ok := err.(mempool.RuleError); ok {
err = rerr.Err
}
// The default reason to reject a transaction/block is due to it being
// invalid somehow.
code := wire.RejectInvalid
var reason string
switch err := err.(type) {
case blockchain.RuleError:
// Convert the chain error to a reject code.
switch err.ErrorCode {
// Rejected due to duplicate.
case blockchain.ErrDuplicateBlock:
code = wire.RejectDuplicate
// Rejected due to obsolete version.
case blockchain.ErrBlockVersionTooOld:
code = wire.RejectObsolete
// Rejected due to checkpoint.
case blockchain.ErrCheckpointTimeTooOld,
blockchain.ErrDifficultyTooLow,
blockchain.ErrBadCheckpoint,
blockchain.ErrForkTooOld:
code = wire.RejectCheckpoint
}
reason = err.Error()
case mempool.TxRuleError:
switch err.ErrorCode {
// Error codes which map to a duplicate transaction already
// mined or in the mempool.
case mempool.ErrMempoolDoubleSpend,
mempool.ErrAlreadyVoted,
mempool.ErrDuplicate,
mempool.ErrTooManyVotes,
mempool.ErrDuplicateRevocation,
mempool.ErrAlreadyExists,
mempool.ErrOrphan:
code = wire.RejectDuplicate
// Error codes which map to a non-standard transaction being
// relayed.
case mempool.ErrOrphanPolicyViolation,
mempool.ErrOldVote,
mempool.ErrSeqLockUnmet,
mempool.ErrNonStandard:
code = wire.RejectNonstandard
// Error codes which map to an insufficient fee being paid.
case mempool.ErrInsufficientFee,
mempool.ErrInsufficientPriority:
code = wire.RejectInsufficientFee
// Error codes which map to an attempt to create dust outputs.
case mempool.ErrDustOutput:
code = wire.RejectDust
}
reason = err.Error()
default:
reason = fmt.Sprintf("rejected: %v", err)
}
return code, reason
}
// handleTxMsg handles transaction messages from all peers.
func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of
@ -726,7 +685,7 @@ func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// Convert the error into an appropriate reject message and
// send it.
code, reason := errToWireRejectCode(err)
code, reason := mempool.ErrToRejectErr(err)
tmsg.peer.PushRejectMsg(wire.CmdTx, code, reason, txHash,
false)
return
@ -738,7 +697,7 @@ func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// current returns true if we believe we are synced with our peers, false if we
// still have blocks to check
func (b *blockManager) current() bool {
if !b.cfg.Chain.IsCurrent() {
if !b.chain.IsCurrent() {
return false
}
@ -750,31 +709,183 @@ func (b *blockManager) current() bool {
// No matter what chain thinks, if we are below the block we are syncing
// to we are not current.
if b.cfg.Chain.BestSnapshot().Height < b.syncPeer.LastBlock() {
if b.chain.BestSnapshot().Height < b.syncPeer.LastBlock() {
return false
}
return true
}
// calcTxTreeMerkleRoot calculates and returns the merkle root for the provided
// transactions. The full (including witness data) hashes for the transactions
// are used as required for merkle roots.
func calcTxTreeMerkleRoot(transactions []*dcrutil.Tx) chainhash.Hash {
if len(transactions) == 0 {
// All zero.
return chainhash.Hash{}
// checkBlockForHiddenVotes checks to see if a newly added block contains
// any votes that were previously unknown to our daemon. If it does, it
// adds these votes to the cached parent block template.
//
// This is UNSAFE for concurrent access. It must be called in single threaded
// access through the block mananger. All template access must also be routed
// through the block manager.
func (b *blockManager) checkBlockForHiddenVotes(block *dcrutil.Block) {
// Identify the cached parent template; it's possible that
// the parent template hasn't yet been updated, so we may
// need to use the current template.
var template *BlockTemplate
if b.cachedCurrentTemplate != nil {
if b.cachedCurrentTemplate.Height ==
block.Height() {
template = b.cachedCurrentTemplate
}
}
if template == nil &&
b.cachedParentTemplate != nil {
if b.cachedParentTemplate.Height ==
block.Height() {
template = b.cachedParentTemplate
}
}
// Note that the backing array is provided with space for one additional
// item when the number of leaves is odd as an optimization for the in-place
// calculation to avoid the need grow the backing array.
allocLen := len(transactions) + len(transactions)&1
leaves := make([]chainhash.Hash, 0, allocLen)
for _, tx := range transactions {
leaves = append(leaves, tx.MsgTx().TxHashFull())
// No template to alter.
if template == nil {
return
}
return standalone.CalcMerkleRootInPlace(leaves)
// Make sure that the template has the same parent
// as the new block.
if template.Block.Header.PrevBlock !=
block.MsgBlock().Header.PrevBlock {
bmgrLog.Warnf("error found while trying to check incoming " +
"block for hidden votes: template did not have the " +
"same parent as the incoming block")
return
}
votesFromBlock := make([]*dcrutil.Tx, 0,
activeNetParams.TicketsPerBlock)
for _, stx := range block.STransactions() {
if stake.IsSSGen(stx.MsgTx()) {
votesFromBlock = append(votesFromBlock, stx)
}
}
// Now that we have the template, grab the votes and compare
// them with those found in the newly added block. If we don't
// the votes, they will need to be added to our block template.
// Here we map the vote by their ticket hashes, since the vote
// hash itself varies with the settings of voteBits.
var newVotes []*dcrutil.Tx
var oldTickets []*dcrutil.Tx
var oldRevocations []*dcrutil.Tx
oldVoteMap := make(map[chainhash.Hash]struct{},
int(b.cfg.ChainParams.TicketsPerBlock))
templateBlock := dcrutil.NewBlock(template.Block)
// Add all the votes found in our template. Keep their
// hashes in a map for easy lookup in the next loop.
for _, stx := range templateBlock.STransactions() {
mstx := stx.MsgTx()
txType := stake.DetermineTxType(mstx)
if txType == stake.TxTypeSSGen {
ticketH := mstx.TxIn[1].PreviousOutPoint.Hash
oldVoteMap[ticketH] = struct{}{}
newVotes = append(newVotes, stx)
}
// Create a list of old tickets and revocations
// while we're in this loop.
if txType == stake.TxTypeSStx {
oldTickets = append(oldTickets, stx)
}
if txType == stake.TxTypeSSRtx {
oldRevocations = append(oldRevocations, stx)
}
}
// Check the votes seen in the block. If the votes
// are new, append them.
for _, vote := range votesFromBlock {
ticketH := vote.MsgTx().TxIn[1].PreviousOutPoint.Hash
if _, exists := oldVoteMap[ticketH]; !exists {
newVotes = append(newVotes, vote)
}
}
// Check the length of the reconstructed voter list for
// integrity.
votesTotal := len(newVotes)
if votesTotal > int(b.cfg.ChainParams.TicketsPerBlock) {
bmgrLog.Warnf("error found while adding hidden votes "+
"from block %v to the old block template: %v max "+
"votes expected but %v votes found", block.Hash(),
int(b.cfg.ChainParams.TicketsPerBlock), votesTotal)
return
}
// Clear the old stake transactions and begin inserting the
// new vote list along with all the old transactions. Do this
// for both the underlying template msgBlock and a new slice
// of transaction pointers so that a new merkle root can be
// calculated.
template.Block.ClearSTransactions()
updatedTxTreeStake := make([]*dcrutil.Tx, 0,
len(newVotes)+len(oldTickets)+len(oldRevocations))
for _, vote := range newVotes {
updatedTxTreeStake = append(updatedTxTreeStake, vote)
template.Block.AddSTransaction(vote.MsgTx())
}
for _, ticket := range oldTickets {
updatedTxTreeStake = append(updatedTxTreeStake, ticket)
template.Block.AddSTransaction(ticket.MsgTx())
}
for _, revocation := range oldRevocations {
updatedTxTreeStake = append(updatedTxTreeStake, revocation)
template.Block.AddSTransaction(revocation.MsgTx())
}
// Create a new coinbase and update the coinbase pointer
// in the underlying template msgBlock.
random, err := wire.RandomUint64()
if err != nil {
return
}
height := block.MsgBlock().Header.Height
opReturnPkScript, err := standardCoinbaseOpReturn(height, random)
if err != nil {
// Stopping at this step will lead to a corrupted block template
// because the stake tree has already been manipulated, so throw
// an error.
bmgrLog.Errorf("failed to create coinbase OP_RETURN while generating " +
"block with extra found voters")
return
}
coinbase, err := createCoinbaseTx(b.chain.FetchSubsidyCache(),
template.Block.Transactions[0].TxIn[0].SignatureScript,
opReturnPkScript, int64(template.Block.Header.Height),
cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))],
uint16(votesTotal), b.cfg.ChainParams)
if err != nil {
bmgrLog.Errorf("failed to create coinbase while generating " +
"block with extra found voters")
return
}
template.Block.Transactions[0] = coinbase.MsgTx()
// Patch the header. First, reconstruct the merkle trees, then
// correct the number of voters, and finally recalculate the size.
updatedTxTreeRegular := make([]*dcrutil.Tx, 0,
len(template.Block.Transactions))
updatedTxTreeRegular = append(updatedTxTreeRegular, coinbase)
for i, mtx := range template.Block.Transactions {
// Coinbase
if i == 0 {
continue
}
tx := dcrutil.NewTx(mtx)
updatedTxTreeRegular = append(updatedTxTreeRegular, tx)
}
merkles := blockchain.BuildMerkleTreeStore(updatedTxTreeRegular)
template.Block.Header.StakeRoot = *merkles[len(merkles)-1]
smerkles := blockchain.BuildMerkleTreeStore(updatedTxTreeStake)
template.Block.Header.Voters = uint16(votesTotal)
template.Block.Header.StakeRoot = *smerkles[len(smerkles)-1]
template.Block.Header.Size = uint32(template.Block.SerializeSize())
}
// handleBlockMsg handles block messages from all peers.
@ -820,7 +931,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// Process the block to include validation, best chain selection, orphan
// handling, etc.
forkLen, isOrphan, err := b.cfg.Chain.ProcessBlock(bmsg.block,
forkLen, isOrphan, err := b.chain.ProcessBlock(bmsg.block,
behaviorFlags)
if err != nil {
// When the error is a rule error, it means the block was simply
@ -841,21 +952,21 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// Convert the error into an appropriate reject message and
// send it.
code, reason := errToWireRejectCode(err)
code, reason := mempool.ErrToRejectErr(err)
bmsg.peer.PushRejectMsg(wire.CmdBlock, code, reason,
blockHash, false)
return
}
// Meta-data about the new block this peer is reporting. We use this
// below to update this peer's latest block height and the heights of
// below to update this peer's lastest block height and the heights of
// other peers based on their last announced block hash. This allows us
// to dynamically update the block heights of peers, avoiding stale
// heights when looking for a new sync peer. Upon acceptance of a block
// or recognition of an orphan, we also use this information to update
// the block heights over other peers who's invs may have been ignored
// if we are actively syncing while the chain is not yet current or
// who may have lost the lock announcement race.
// who may have lost the lock announcment race.
var heightUpdate int64
var blkHashUpdate *chainhash.Hash
@ -871,13 +982,12 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
heightUpdate = int64(cbHeight)
blkHashUpdate = blockHash
orphanRoot := b.cfg.Chain.GetOrphanRoot(blockHash)
blkLocator, err := b.cfg.Chain.LatestBlockLocator()
orphanRoot := b.chain.GetOrphanRoot(blockHash)
locator, err := b.chain.LatestBlockLocator()
if err != nil {
bmgrLog.Warnf("Failed to get block locator for the "+
"latest block: %v", err)
} else {
locator := chainBlockLocatorToHashes(blkLocator)
err = bmsg.peer.PushGetBlocksMsg(locator, orphanRoot)
if err != nil {
bmgrLog.Warnf("Failed to push getblocksmsg for the "+
@ -891,9 +1001,18 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
onMainChain := !isOrphan && forkLen == 0
if onMainChain {
// A new block is connected, however, this new block may have
// votes in it that were hidden from the network and which
// validate our parent block. We should bolt these new votes
// into the tx tree stake of the old block template on parent.
svl := b.cfg.ChainParams.StakeValidationHeight
if b.AggressiveMining && bmsg.block.Height() >= svl {
b.checkBlockForHiddenVotes(bmsg.block)
}
// Notify stake difficulty subscribers and prune invalidated
// transactions.
best := b.cfg.Chain.BestSnapshot()
best := b.chain.BestSnapshot()
r := b.cfg.RpcServer()
if r != nil {
// Update registered websocket clients on the
@ -909,12 +1028,19 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
b.cfg.TxMemPool.PruneExpiredTx()
// Update this peer's latest block height, for future
// potential sync node candidacy.
// potential sync node candidancy.
heightUpdate = best.Height
blkHashUpdate = &best.Hash
// Clear the rejected transactions.
b.rejectedTxns = make(map[chainhash.Hash]struct{})
// Allow any clients performing long polling via the
// getblocktemplate RPC to be notified when the new block causes
// their old block template to become stale.
if r := b.cfg.RpcServer(); r != nil {
r.gbtWorkState.NotifyBlockConnected(blockHash)
}
}
}
@ -954,7 +1080,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
prevHash := b.nextCheckpoint.Hash
b.nextCheckpoint = b.findNextHeaderCheckpoint(prevHeight)
if b.nextCheckpoint != nil {
locator := []chainhash.Hash{*prevHash}
locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash})
err := bmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
if err != nil {
bmgrLog.Warnf("Failed to send getheaders message to "+
@ -973,7 +1099,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
b.headersFirstMode = false
b.headerList.Init()
bmgrLog.Infof("Reached the final checkpoint -- switching to normal mode")
locator := []chainhash.Hash{*blockHash}
locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash})
err = bmsg.peer.PushGetBlocksMsg(locator, &zeroHash)
if err != nil {
bmgrLog.Warnf("Failed to send getblocks message to peer %s: %v",
@ -1060,7 +1186,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
prevNodeEl := b.headerList.Back()
if prevNodeEl == nil {
bmgrLog.Warnf("Header list does not contain a previous" +
" element as expected -- disconnecting peer")
"element as expected -- disconnecting peer")
hmsg.peer.Disconnect()
return
}
@ -1122,7 +1248,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
// This header is not a checkpoint, so request the next batch of
// headers starting from the latest known header and ending with the
// next checkpoint.
locator := []chainhash.Hash{*finalHash}
locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
err := hmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
if err != nil {
bmgrLog.Warnf("Failed to send getheaders message to "+
@ -1141,7 +1267,7 @@ func (b *blockManager) haveInventory(invVect *wire.InvVect) (bool, error) {
case wire.InvTypeBlock:
// Ask chain if the block is known to it in any form (main
// chain, side chain, or orphan).
return b.cfg.Chain.HaveBlock(&invVect.Hash)
return b.chain.HaveBlock(&invVect.Hash)
case wire.InvTypeTx:
// Ask the transaction memory pool if the transaction is known
@ -1152,14 +1278,14 @@ func (b *blockManager) haveInventory(invVect *wire.InvVect) (bool, error) {
// Check if the transaction exists from the point of view of the
// end of the main chain.
entry, err := b.cfg.Chain.FetchUtxoEntry(&invVect.Hash)
entry, err := b.chain.FetchUtxoEntry(&invVect.Hash)
if err != nil {
return false, err
}
return entry != nil && !entry.IsFullySpent(), nil
}
// The requested inventory is an unsupported type, so just claim
// The requested inventory is is an unsupported type, so just claim
// it is known to avoid requesting it.
return true, nil
}
@ -1199,7 +1325,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
// If our chain is current and a peer announces a block we already
// know of, then update their current block height.
if lastBlock != -1 && isCurrent {
blkHeight, err := b.cfg.Chain.BlockHeightByHash(&invVects[lastBlock].Hash)
blkHeight, err := b.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
if err == nil {
imsg.peer.UpdateLastBlockHeight(blkHeight)
}
@ -1258,19 +1384,18 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
// resending the orphan block as an available block
// to signal there are more missing blocks that need to
// be requested.
if b.cfg.Chain.IsKnownOrphan(&iv.Hash) {
if b.chain.IsKnownOrphan(&iv.Hash) {
// Request blocks starting at the latest known
// up to the root of the orphan that just came
// in.
orphanRoot := b.cfg.Chain.GetOrphanRoot(&iv.Hash)
blkLocator, err := b.cfg.Chain.LatestBlockLocator()
orphanRoot := b.chain.GetOrphanRoot(&iv.Hash)
locator, err := b.chain.LatestBlockLocator()
if err != nil {
bmgrLog.Errorf("PEER: Failed to get block "+
"locator for the latest block: "+
"%v", err)
continue
}
locator := chainBlockLocatorToHashes(blkLocator)
err = imsg.peer.PushGetBlocksMsg(locator, orphanRoot)
if err != nil {
bmgrLog.Errorf("PEER: Failed to push getblocksmsg "+
@ -1287,8 +1412,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
// Request blocks after this one up to the
// final one the remote peer knows about (zero
// stop hash).
blkLocator := b.cfg.Chain.BlockLocatorFromHash(&iv.Hash)
locator := chainBlockLocatorToHashes(blkLocator)
locator := b.chain.BlockLocatorFromHash(&iv.Hash)
err = imsg.peer.PushGetBlocksMsg(locator, &zeroHash)
if err != nil {
bmgrLog.Errorf("PEER: Failed to push getblocksmsg: "+
@ -1405,7 +1529,7 @@ out:
case calcNextReqDiffNodeMsg:
difficulty, err :=
b.cfg.Chain.CalcNextRequiredDiffFromNode(msg.hash,
b.chain.CalcNextRequiredDiffFromNode(msg.hash,
msg.timestamp)
msg.reply <- calcNextReqDifficultyResponse{
difficulty: difficulty,
@ -1413,20 +1537,20 @@ out:
}
case calcNextReqStakeDifficultyMsg:
stakeDiff, err := b.cfg.Chain.CalcNextRequiredStakeDifficulty()
stakeDiff, err := b.chain.CalcNextRequiredStakeDifficulty()
msg.reply <- calcNextReqStakeDifficultyResponse{
stakeDifficulty: stakeDiff,
err: err,
}
case forceReorganizationMsg:
err := b.cfg.Chain.ForceHeadReorganization(
err := b.chain.ForceHeadReorganization(
msg.formerBest, msg.newBest)
if err == nil {
// Notify stake difficulty subscribers and prune
// invalidated transactions.
best := b.cfg.Chain.BestSnapshot()
best := b.chain.BestSnapshot()
r := b.cfg.RpcServer()
if r != nil {
r.ntfnMgr.NotifyStakeDifficulty(
@ -1446,14 +1570,14 @@ out:
}
case tipGenerationMsg:
g, err := b.cfg.Chain.TipGeneration()
g, err := b.chain.TipGeneration()
msg.reply <- tipGenerationResponse{
hashes: g,
err: err,
}
case processBlockMsg:
forkLen, isOrphan, err := b.cfg.Chain.ProcessBlock(
forkLen, isOrphan, err := b.chain.ProcessBlock(
msg.block, msg.flags)
if err != nil {
msg.reply <- processBlockResponse{
@ -1469,7 +1593,7 @@ out:
if onMainChain {
// Notify stake difficulty subscribers and prune
// invalidated transactions.
best := b.cfg.Chain.BestSnapshot()
best := b.chain.BestSnapshot()
if r != nil {
r.ntfnMgr.NotifyStakeDifficulty(
&StakeDifficultyNtfnData{
@ -1483,6 +1607,13 @@ out:
b.cfg.TxMemPool.PruneExpiredTx()
}
// Allow any clients performing long polling via the
// getblocktemplate RPC to be notified when the new block causes
// their old block template to become stale.
if r != nil {
r.gbtWorkState.NotifyBlockConnected(msg.block.Hash())
}
msg.reply <- processBlockResponse{
isOrphan: isOrphan,
err: nil,
@ -1499,6 +1630,26 @@ out:
case isCurrentMsg:
msg.reply <- b.current()
case getCurrentTemplateMsg:
cur := deepCopyBlockTemplate(b.cachedCurrentTemplate)
msg.reply <- getCurrentTemplateResponse{
Template: cur,
}
case setCurrentTemplateMsg:
b.cachedCurrentTemplate = deepCopyBlockTemplate(msg.Template)
msg.reply <- setCurrentTemplateResponse{}
case getParentTemplateMsg:
par := deepCopyBlockTemplate(b.cachedParentTemplate)
msg.reply <- getParentTemplateResponse{
Template: par,
}
case setParentTemplateMsg:
b.cachedParentTemplate = deepCopyBlockTemplate(msg.Template)
msg.reply <- setParentTemplateResponse{}
default:
bmgrLog.Warnf("Invalid message type in block handler: %T", msg)
}
@ -1538,15 +1689,8 @@ func isDoubleSpendOrDuplicateError(err error) bool {
}
rerr, ok := merr.Err.(mempool.TxRuleError)
if ok {
switch rerr.ErrorCode {
case mempool.ErrDuplicate:
return true
case mempool.ErrAlreadyExists:
return true
default:
return false
}
if ok && rerr.RejectCode == wire.RejectDuplicate {
return true
}
cerr, ok := merr.Err.(blockchain.RuleError)
@ -1571,7 +1715,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// which could result in a deadlock.
block, ok := notification.Data.(*dcrutil.Block)
if !ok {
bmgrLog.Warnf("New tip block checked notification is not a block.")
bmgrLog.Warnf("New tip block checkedd notification is not a block.")
break
}
@ -1623,7 +1767,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// other words, it is extending the shorter side chain. The reorg depth
// would be 106 - (103 - 3) = 6. This should intuitively make sense,
// because if the side chain were to be extended enough to become the
// best chain, it would result in a reorg that would remove 6 blocks,
// best chain, it would result in a a reorg that would remove 6 blocks,
// namely blocks 101, 102, 103, 104, 105, and 106.
blockHash := block.Hash()
bestHeight := band.BestHeight
@ -1638,7 +1782,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// Obtain the winning tickets for this block. handleNotifyMsg
// should be safe for concurrent access of things contained
// within blockchain.
wt, _, _, err := b.cfg.Chain.LotteryDataForBlock(blockHash)
wt, _, _, err := b.chain.LotteryDataForBlock(blockHash)
if err != nil {
bmgrLog.Errorf("Couldn't calculate winning tickets for "+
"accepted block %v: %v", blockHash, err.Error())
@ -1706,7 +1850,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// TODO: In the case the new tip disapproves the previous block, any
// transactions the previous block contains in its regular tree which
// double spend the same inputs as transactions in either tree of the
// current tip should ideally be tracked in the pool as eligible for
// current tip should ideally be tracked in the pool as eligibile for
// inclusion in an alternative tip (side chain block) in case the
// current tip block does not get enough votes. However, the
// transaction pool currently does not provide any way to distinguish
@ -1914,6 +2058,10 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
if r := b.cfg.RpcServer(); r != nil {
r.ntfnMgr.NotifyReorganization(rd)
}
// Drop the associated mining template from the old chain, since it
// will be no longer valid.
b.cachedCurrentTemplate = nil
}
}
@ -2043,7 +2191,7 @@ func (b *blockManager) requestFromPeer(p *serverPeer, blocks, txs []*chainhash.H
// Check to see if we already have this block, too.
// If so, skip.
exists, err := b.cfg.Chain.HaveBlock(bh)
exists, err := b.chain.HaveBlock(bh)
if err != nil {
return err
}
@ -2080,7 +2228,7 @@ func (b *blockManager) requestFromPeer(p *serverPeer, blocks, txs []*chainhash.H
// Check if the transaction exists from the point of view of the
// end of the main chain.
entry, err := b.cfg.Chain.FetchUtxoEntry(vh)
entry, err := b.chain.FetchUtxoEntry(vh)
if err != nil {
return err
}
@ -2202,7 +2350,37 @@ func (b *blockManager) IsCurrent() bool {
// TicketPoolValue returns the current value of the total stake in the ticket
// pool.
func (b *blockManager) TicketPoolValue() (dcrutil.Amount, error) {
return b.cfg.Chain.TicketPoolValue()
return b.chain.TicketPoolValue()
}
// GetCurrentTemplate gets the current block template for mining.
func (b *blockManager) GetCurrentTemplate() *BlockTemplate {
reply := make(chan getCurrentTemplateResponse)
b.msgChan <- getCurrentTemplateMsg{reply: reply}
response := <-reply
return response.Template
}
// SetCurrentTemplate sets the current block template for mining.
func (b *blockManager) SetCurrentTemplate(bt *BlockTemplate) {
reply := make(chan setCurrentTemplateResponse)
b.msgChan <- setCurrentTemplateMsg{Template: bt, reply: reply}
<-reply
}
// GetParentTemplate gets the current parent block template for mining.
func (b *blockManager) GetParentTemplate() *BlockTemplate {
reply := make(chan getParentTemplateResponse)
b.msgChan <- getParentTemplateMsg{reply: reply}
response := <-reply
return response.Template
}
// SetParentTemplate sets the current parent block template for mining.
func (b *blockManager) SetParentTemplate(bt *BlockTemplate) {
reply := make(chan setParentTemplateResponse)
b.msgChan <- setParentTemplateMsg{Template: bt, reply: reply}
<-reply
}
// newBlockManager returns a new Decred block manager.
@ -2210,6 +2388,7 @@ func (b *blockManager) TicketPoolValue() (dcrutil.Amount, error) {
func newBlockManager(config *blockManagerConfig) (*blockManager, error) {
bm := blockManager{
cfg: config,
chain: config.Chain,
rejectedTxns: make(map[chainhash.Hash]struct{}),
requestedTxns: make(map[chainhash.Hash]struct{}),
requestedBlocks: make(map[chainhash.Hash]struct{}),
@ -2220,8 +2399,8 @@ func newBlockManager(config *blockManagerConfig) (*blockManager, error) {
quit: make(chan struct{}),
}
best := bm.cfg.Chain.BestSnapshot()
bm.cfg.Chain.DisableCheckpoints(cfg.DisableCheckpoints)
best := bm.chain.BestSnapshot()
bm.chain.DisableCheckpoints(cfg.DisableCheckpoints)
if !cfg.DisableCheckpoints {
// Initialize the next checkpoint based on the current height.
bm.nextCheckpoint = bm.findNextHeaderCheckpoint(best.Height)
@ -2234,7 +2413,7 @@ func newBlockManager(config *blockManagerConfig) (*blockManager, error) {
// Dump the blockchain here if asked for it, and quit.
if cfg.DumpBlockchain != "" {
err := dumpBlockChain(bm.cfg.Chain, best.Height)
err := dumpBlockChain(bm.chain, best.Height)
if err != nil {
return nil, err
}

View File

@ -1,7 +1,7 @@
Certgen
======
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/certgen)

View File

@ -1,7 +1,7 @@
chaincfg
========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/chaincfg)
@ -23,8 +23,8 @@ import (
"fmt"
"log"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/chaincfg"
)
var testnet = flag.Bool("testnet", false, "operate on the testnet Decred network")

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -9,7 +9,7 @@ import (
"io"
"math/big"
"github.com/decred/dcrd/dcrec/edwards/v2"
"github.com/decred/dcrd/dcrec/edwards"
)
type edwardsDSA struct {
@ -153,7 +153,7 @@ func (e edwardsDSA) Decrypt(privkey []byte, in []byte) ([]byte,
return e.decrypt(privkey, in)
}
// newEdwardsDSA instantiates a function DSA subsystem over the edwards 25519
// newEdwardsDSA instatiates a function DSA subsystem over the edwards 25519
// curve. A caveat for the functions below is that they're all routed through
// interfaces, and nil returns from the library itself for interfaces must
// ALWAYS be checked by checking the return value by attempted dereference
@ -184,14 +184,14 @@ func newEdwardsDSA() DSA {
// Private keys
newPrivateKey: func(d *big.Int) PrivateKey {
pk := edwards.NewPrivateKey(d)
pk := edwards.NewPrivateKey(edwardsCurve, d)
if pk != nil {
return PrivateKey(*pk)
}
return nil
},
privKeyFromBytes: func(pk []byte) (PrivateKey, PublicKey) {
priv, pub := edwards.PrivKeyFromBytes(pk)
priv, pub := edwards.PrivKeyFromBytes(edwardsCurve, pk)
if priv == nil {
return nil, nil
}
@ -203,7 +203,7 @@ func newEdwardsDSA() DSA {
return tpriv, tpub
},
privKeyFromScalar: func(pk []byte) (PrivateKey, PublicKey) {
priv, pub, err := edwards.PrivKeyFromScalar(pk)
priv, pub, err := edwards.PrivKeyFromScalar(edwardsCurve, pk)
if err != nil {
return nil, nil
}
@ -223,12 +223,12 @@ func newEdwardsDSA() DSA {
// Public keys
newPublicKey: func(x *big.Int, y *big.Int) PublicKey {
pk := edwards.NewPublicKey(x, y)
pk := edwards.NewPublicKey(edwardsCurve, x, y)
tpk := PublicKey(*pk)
return tpk
},
parsePubKey: func(pubKeyStr []byte) (PublicKey, error) {
pk, err := edwards.ParsePubKey(pubKeyStr)
pk, err := edwards.ParsePubKey(edwardsCurve, pubKeyStr)
if err != nil {
return nil, err
}
@ -252,7 +252,7 @@ func newEdwardsDSA() DSA {
return ts
},
parseDERSignature: func(sigStr []byte) (Signature, error) {
sig, err := edwards.ParseDERSignature(sigStr)
sig, err := edwards.ParseDERSignature(edwardsCurve, sigStr)
if err != nil {
return nil, err
}
@ -260,7 +260,7 @@ func newEdwardsDSA() DSA {
return ts, err
},
parseSignature: func(sigStr []byte) (Signature, error) {
sig, err := edwards.ParseSignature(sigStr)
sig, err := edwards.ParseSignature(edwardsCurve, sigStr)
if err != nil {
return nil, err
}
@ -285,7 +285,7 @@ func newEdwardsDSA() DSA {
if !ok {
return nil, nil, errors.New("wrong type")
}
r, s, err = edwards.Sign(&epriv, hash)
r, s, err = edwards.Sign(edwardsCurve, &epriv, hash)
return
},
verify: func(pub PublicKey, hash []byte, r, s *big.Int) bool {
@ -301,23 +301,25 @@ func newEdwardsDSA() DSA {
// Symmetric cipher encryption
generateSharedSecret: func(privkey []byte, x, y *big.Int) []byte {
privKeyLocal, _, err := edwards.PrivKeyFromScalar(privkey)
privKeyLocal, _, err := edwards.PrivKeyFromScalar(edwardsCurve,
privkey)
if err != nil {
return nil
}
pubkey := edwards.NewPublicKey(x, y)
pubkey := edwards.NewPublicKey(edwardsCurve, x, y)
return edwards.GenerateSharedSecret(privKeyLocal, pubkey)
},
encrypt: func(x, y *big.Int, in []byte) ([]byte, error) {
pubkey := edwards.NewPublicKey(x, y)
return edwards.Encrypt(pubkey, in)
pubkey := edwards.NewPublicKey(edwardsCurve, x, y)
return edwards.Encrypt(edwardsCurve, pubkey, in)
},
decrypt: func(privkey []byte, in []byte) ([]byte, error) {
privKeyLocal, _, err := edwards.PrivKeyFromScalar(privkey)
privKeyLocal, _, err := edwards.PrivKeyFromScalar(edwardsCurve,
privkey)
if err != nil {
return nil, err
}
return edwards.Decrypt(privKeyLocal, in)
return edwards.Decrypt(edwardsCurve, privKeyLocal, in)
},
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -10,7 +10,7 @@ import (
"io"
"math/big"
"github.com/decred/dcrd/dcrec/secp256k1/v2"
"github.com/decred/dcrd/dcrec/secp256k1"
)
type secp256k1DSA struct {
@ -154,7 +154,7 @@ func (sp secp256k1DSA) Decrypt(privkey []byte, in []byte) ([]byte,
return sp.decrypt(privkey, in)
}
// newSecp256k1DSA instantiates a function DSA subsystem over the secp256k1
// newSecp256k1DSA instatiates a function DSA subsystem over the secp256k1
// curve. A caveat for the functions below is that they're all routed through
// interfaces, and nil returns from the library itself for interfaces must
// ALWAYS be checked by checking the return value by attempted dereference

View File

@ -9,8 +9,8 @@ import (
"io"
"math/big"
"github.com/decred/dcrd/dcrec/secp256k1/v2"
"github.com/decred/dcrd/dcrec/secp256k1/v2/schnorr"
"github.com/decred/dcrd/dcrec/secp256k1"
"github.com/decred/dcrd/dcrec/secp256k1/schnorr"
)
type secSchnorrDSA struct {
@ -150,7 +150,7 @@ func (sp secSchnorrDSA) Decrypt(privkey []byte, in []byte) ([]byte,
return sp.decrypt(privkey, in)
}
// newSecSchnorrDSA instantiates a function DSA subsystem over the secp256k1
// newSecSchnorrDSA instatiates a function DSA subsystem over the secp256k1
// curve. A caveat for the functions below is that they're all routed through
// interfaces, and nil returns from the library itself for interfaces must
// ALWAYS be checked by checking the return value by attempted dereference
@ -225,7 +225,7 @@ func newSecSchnorrDSA() DSA {
return tpk
},
parsePubKey: func(pubKeyStr []byte) (PublicKey, error) {
pk, err := schnorr.ParsePubKey(pubKeyStr)
pk, err := schnorr.ParsePubKey(secp256k1Curve, pubKeyStr)
if err != nil {
return nil, err
}

View File

@ -1,7 +1,7 @@
chainhash
=========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/chaincfg/chainhash)

View File

@ -25,8 +25,8 @@
// "fmt"
// "log"
//
// "github.com/decred/dcrd/dcrutil/v2"
// "github.com/decred/dcrd/chaincfg/v2"
// "github.com/decred/dcrd/dcrutil"
// "github.com/decred/dcrd/chaincfg"
// )
//
// var testnet = flag.Bool("testnet", false, "operate on the testnet Decred network")

View File

@ -4,8 +4,8 @@ go 1.11
require (
github.com/davecgh/go-spew v1.1.1
github.com/decred/dcrd/chaincfg/chainhash v1.0.2
github.com/decred/dcrd/dcrec/edwards/v2 v2.0.0
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0
github.com/decred/dcrd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/edwards v1.0.0
github.com/decred/dcrd/dcrec/secp256k1 v1.0.1
github.com/decred/dcrd/wire v1.2.0
)

View File

@ -8,13 +8,9 @@ github.com/dchest/blake256 v1.0.0 h1:6gUgI5MHdz9g0TdrgKqXsoDX+Zjxmm1Sc6OsoGru50I
github.com/dchest/blake256 v1.0.0/go.mod h1:xXNWCE1jsAP8DAjP+rKw2MbeqLczjI3TRx2VK+9OEYY=
github.com/decred/dcrd/chaincfg/chainhash v1.0.1 h1:0vG7U9+dSjSCaHQKdoSKURK2pOb47+b+8FK5q4+Je7M=
github.com/decred/dcrd/chaincfg/chainhash v1.0.1/go.mod h1:OVfvaOsNLS/A1y4Eod0Ip/Lf8qga7VXCQjUQLbkY0Go=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/edwards/v2 v2.0.0 h1:E5KszxGgpjpmW8vN811G6rBAZg0/S/DftdGqN4FW5x4=
github.com/decred/dcrd/dcrec/edwards/v2 v2.0.0/go.mod h1:d0H8xGMWbiIQP7gN3v2rByWUcuZPm9YsgmnfoxgbINc=
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0 h1:3GIJYXQDAKpLEFriGFN8SbSffak10UXHGdIcFaMPykY=
github.com/decred/dcrd/dcrec/secp256k1/v2 v2.0.0/go.mod h1:3s92l0paYkZoIHuj4X93Teg/HB7eGM9x/zokGw+u4mY=
github.com/decred/dcrd/dcrec/edwards v1.0.0 h1:UDcPNzclKiJlWqV3x1Fl8xMCJrolo4PB4X9t8LwKDWU=
github.com/decred/dcrd/dcrec/edwards v1.0.0/go.mod h1:HblVh1OfMt7xSxUL1ufjToaEvpbjpWvvTAUx4yem8BI=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.1 h1:EFWVd1p0t0Y5tnsm/dJujgV0ORogRJ6vo7CMAjLseAc=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.1/go.mod h1:lhu4eZFSfTJWUnR3CFRcpD+Vta0KUAqnhTsTksHXgy0=
github.com/decred/dcrd/wire v1.2.0 h1:HqJVB7vcklIguzFWgRXw/WYCQ9cD3bUC5TKj53i1Hng=
github.com/decred/dcrd/wire v1.2.0/go.mod h1:/JKOsLInOJu6InN+/zH5AyCq3YDIOW/EqcffvU8fJHM=

View File

@ -95,7 +95,7 @@ type Choice struct {
// (abstain) and exist only once in the Vote.Choices array.
IsAbstain bool
// This choice indicates a hard No Vote. By convention this must exist
// This coince indicates a hard No Vote. By convention this must exist
// only once in the Vote.Choices array.
IsNo bool
}
@ -114,7 +114,7 @@ func (v *Vote) VoteIndex(vote uint16) int {
}
const (
// VoteIDMaxBlockSize is the vote ID for the maximum block size
// VoteIDMaxBlockSize is the vote ID for the the maximum block size
// increase agenda used for the hard fork demo.
VoteIDMaxBlockSize = "maxblocksize"
@ -364,7 +364,7 @@ type Params struct {
// SLIP-0044 registered coin type used for BIP44, used in the hierarchical
// deterministic path for address generation.
// All SLIP-0044 registered coin types are defined here:
// All SLIP-0044 registered coin types are are defined here:
// https://github.com/satoshilabs/slips/blob/master/slip-0044.md
SLIP0044CoinType uint32

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -10,9 +10,9 @@ import (
"path/filepath"
"runtime"
"github.com/decred/dcrd/blockchain/v2"
"github.com/decred/dcrd/blockchain/v2/indexers"
"github.com/decred/dcrd/database/v2"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/indexers"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/internal/limits"
"github.com/decred/slog"
)

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -10,10 +10,10 @@ import (
"os"
"path/filepath"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/database/v2"
_ "github.com/decred/dcrd/database/v2/ffldb"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
"github.com/decred/dcrd/dcrutil"
flags "github.com/jessevdk/go-flags"
)
@ -27,7 +27,7 @@ var (
dcrdHomeDir = dcrutil.AppDataDir("dcrd", false)
defaultDataDir = filepath.Join(dcrdHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeNetParams = chaincfg.MainNetParams()
activeNetParams = &chaincfg.MainNetParams
)
// config defines the configuration options for findcheckpoint.
@ -45,7 +45,7 @@ type config struct {
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
}
// fileExists reports whether the named file or directory exists.
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
@ -93,11 +93,11 @@ func loadConfig() (*config, []string, error) {
// while we're at it
if cfg.TestNet {
numNets++
activeNetParams = chaincfg.TestNet3Params()
activeNetParams = &chaincfg.TestNet3Params
}
if cfg.SimNet {
numNets++
activeNetParams = chaincfg.SimNetParams()
activeNetParams = &chaincfg.SimNetParams
}
if numNets > 1 {
str := "%s: the testnet, regtest, and simnet params can't be " +

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -12,11 +12,11 @@ import (
"sync"
"time"
"github.com/decred/dcrd/blockchain/v2"
"github.com/decred/dcrd/blockchain/v2/indexers"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/blockchain/indexers"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database/v2"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/wire"
)
@ -139,7 +139,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
}
isMainChain := !isOrphan && forkLen == 0
if !isMainChain {
return false, fmt.Errorf("import file contains a block that "+
return false, fmt.Errorf("import file contains an block that "+
"does not extend the main chain: %v", blockHash)
}
if isOrphan {

View File

@ -17,10 +17,10 @@ import (
"strings"
"github.com/decred/dcrd/dcrjson/v3"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/internal/version"
dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v2"
dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types"
wallettypes "github.com/decred/dcrwallet/rpc/jsonrpc/types"
flags "github.com/jessevdk/go-flags"
@ -212,7 +212,7 @@ func cleanAndExpandPath(path string) string {
return filepath.Join(homeDir, path)
}
// fileExists reports whether the named file or directory exists.
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {

View File

@ -16,7 +16,7 @@ import (
"strings"
"github.com/decred/dcrd/dcrjson/v3"
dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v2"
dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types"
wallettypes "github.com/decred/dcrwallet/rpc/jsonrpc/types"
)
@ -120,7 +120,7 @@ func main() {
cmd, err := dcrjson.NewCmd(method, params...)
if err != nil {
// Show the error along with its error code when it's a
// dcrjson.Error as it realistically will always be since the
// dcrjson.Error as it reallistcally will always be since the
// NewCmd function is only supposed to return errors of that
// type.
if jerr, ok := err.(dcrjson.Error); ok {

View File

@ -15,7 +15,7 @@ import (
"net"
"net/http"
"github.com/decred/dcrd/dcrjson/v3"
"github.com/decred/dcrd/dcrjson/v2"
"github.com/decred/go-socks/socks"
)

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -10,10 +10,10 @@ import (
"os"
"path/filepath"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/database/v2"
_ "github.com/decred/dcrd/database/v2/ffldb"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
"github.com/decred/dcrd/dcrutil"
flags "github.com/jessevdk/go-flags"
)
@ -28,7 +28,7 @@ var (
dcrdHomeDir = dcrutil.AppDataDir("dcrd", false)
defaultDataDir = filepath.Join(dcrdHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeNetParams = chaincfg.MainNetParams()
activeNetParams = &chaincfg.MainNetParams
)
// config defines the configuration options for findcheckpoint.
@ -80,11 +80,11 @@ func loadConfig() (*config, []string, error) {
// while we're at it
if cfg.TestNet {
numNets++
activeNetParams = chaincfg.TestNet3Params()
activeNetParams = &chaincfg.TestNet3Params
}
if cfg.SimNet {
numNets++
activeNetParams = chaincfg.SimNetParams()
activeNetParams = &chaincfg.SimNetParams
}
if numNets > 1 {
str := "%s: the testnet, regtest, and simnet params can't be " +

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -10,10 +10,10 @@ import (
"os"
"path/filepath"
"github.com/decred/dcrd/blockchain/v2"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/database/v2"
"github.com/decred/dcrd/database"
)
const blockDbNamePrefix = "blocks"
@ -53,7 +53,7 @@ func findCandidates(chain *blockchain.BlockChain, latestHash *chainhash.Hash) ([
// Set the latest checkpoint to the genesis block if there isn't
// already one.
latestCheckpoint = &chaincfg.Checkpoint{
Hash: &activeNetParams.GenesisHash,
Hash: activeNetParams.GenesisHash,
Height: 0,
}
}

View File

@ -134,7 +134,7 @@ func cleanAndExpandPath(path string) string {
return filepath.Join(homeDir, path)
}
// fileExists reports whether the named file or directory exists.
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {

View File

@ -1,5 +1,5 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -21,13 +21,13 @@ import (
"strings"
"time"
"github.com/decred/dcrd/connmgr/v2"
"github.com/decred/dcrd/database/v2"
_ "github.com/decred/dcrd/database/v2/ffldb"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/connmgr"
"github.com/decred/dcrd/database"
_ "github.com/decred/dcrd/database/ffldb"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/internal/version"
"github.com/decred/dcrd/mempool/v3"
"github.com/decred/dcrd/rpc/jsonrpc/types/v2"
"github.com/decred/dcrd/mempool/v2"
"github.com/decred/dcrd/rpc/jsonrpc/types"
"github.com/decred/dcrd/sampleconfig"
"github.com/decred/go-socks/socks"
"github.com/decred/slog"
@ -129,7 +129,6 @@ type config struct {
OnionProxyUser string `long:"onionuser" description:"Username for onion proxy server"`
OnionProxyPass string `long:"onionpass" default-mask:"-" description:"Password for onion proxy server"`
NoOnion bool `long:"noonion" description:"Disable connecting to tor hidden services"`
NoDiscoverIP bool `long:"nodiscoverip" description:"Disable automatic network address discovery"`
TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."`
TestNet bool `long:"testnet" description:"Use the test network"`
SimNet bool `long:"simnet" description:"Use the simulation test network"`
@ -149,7 +148,7 @@ type config struct {
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
Generate bool `long:"generate" description:"Generate (mine) coins using the CPU"`
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
BlockMinSize uint32 `long:"blockminsize" description:"Minimum block size in bytes to be used when creating a block"`
BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"`
BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"`
BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"`
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
@ -267,7 +266,7 @@ func supportedSubsystems() []string {
// the levels accordingly. An appropriate error is returned if anything is
// invalid.
func parseAndSetDebugLevels(debugLevel string) error {
// When the specified string doesn't have any delimiters, treat it as
// When the specified string doesn't have any delimters, treat it as
// the log level for all subsystems.
if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") {
// Validate debug log level.
@ -298,7 +297,7 @@ func parseAndSetDebugLevels(debugLevel string) error {
// Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists {
str := "the specified subsystem [%v] is invalid -- " +
"supported subsystems %v"
"supported subsytems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems())
}
@ -359,7 +358,7 @@ func normalizeAddresses(addrs []string, defaultPort string) []string {
return removeDuplicateAddresses(addrs)
}
// fileExists reports whether the named file or directory exists.
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
@ -899,7 +898,7 @@ func loadConfig() (*config, []string, error) {
return nil, nil, err
}
// Validate the minrelaytxfee.
// Validate the the minrelaytxfee.
cfg.minRelayTxFee, err = dcrutil.NewAmount(cfg.MinRelayTxFee)
if err != nil {
str := "%s: invalid minrelaytxfee: %v"
@ -924,7 +923,7 @@ func loadConfig() (*config, []string, error) {
return nil, nil, err
}
// Limit the max orphan count to a sane value.
// Limit the max orphan count to a sane vlue.
if cfg.MaxOrphanTxs < 0 {
str := "%s: the maxorphantx option may not be less than 0 " +
"-- parsed [%d]"
@ -981,7 +980,7 @@ func loadConfig() (*config, []string, error) {
// !--nocfilters and --dropcfindex do not mix.
if !cfg.NoCFilters && cfg.DropCFIndex {
err := errors.New("dropcfindex cannot be activated without nocfilters")
err := errors.New("dropcfindex cannot be actived without nocfilters")
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
@ -990,7 +989,7 @@ func loadConfig() (*config, []string, error) {
// Check mining addresses are valid and saved parsed versions.
cfg.miningAddrs = make([]dcrutil.Address, 0, len(cfg.MiningAddrs))
for _, strAddr := range cfg.MiningAddrs {
addr, err := dcrutil.DecodeAddress(strAddr, activeNetParams.Params)
addr, err := dcrutil.DecodeAddress(strAddr)
if err != nil {
str := "%s: mining address '%s' failed to decode: %v"
err := fmt.Errorf(str, funcName, strAddr, err)
@ -998,6 +997,13 @@ func loadConfig() (*config, []string, error) {
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if !addr.IsForNet(activeNetParams.Params) {
str := "%s: mining address '%s' is on the wrong network"
err := fmt.Errorf(str, funcName, strAddr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
cfg.miningAddrs = append(cfg.miningAddrs, addr)
}

View File

@ -5,6 +5,7 @@
package main
import (
"flag"
"os"
"strings"
"testing"
@ -75,5 +76,6 @@ func TestAltDNSNamesWithArg(t *testing.T) {
// init parses the -test.* flags from the command line arguments list and then
// removes them to allow go-flags tests to succeed.
func init() {
flag.Parse()
os.Args = os.Args[:1]
}

View File

@ -1,7 +1,7 @@
connmgr
=======
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/connmgr)

View File

@ -459,7 +459,7 @@ func TestNetworkFailure(t *testing.T) {
// TestStopFailed tests that failed connections are ignored after connmgr is
// stopped.
//
// We have a dialer which sets the stop flag on the conn manager and returns an
// We have a dailer which sets the stop flag on the conn manager and returns an
// err so that the handler assumes that the conn manager is stopped and ignores
// the failure.
func TestStopFailed(t *testing.T) {

View File

@ -14,7 +14,7 @@ import (
const (
// Halflife defines the time (in seconds) by which the transient part
// of the ban score decays to one half of its original value.
// of the ban score decays to one half of it's original value.
Halflife = 60
// lambda is the decaying constant.

View File

@ -1,12 +1,11 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"context"
"encoding/binary"
"errors"
"net"
@ -22,12 +21,6 @@ const (
torTTLExpired = 0x06
torCmdNotSupported = 0x07
torAddrNotSupported = 0x08
torATypeIPv4 = 1
torATypeDomainName = 3
torATypeIPv6 = 4
torCmdResolve = 240
)
var (
@ -56,23 +49,17 @@ var (
}
)
// TorLookupIP uses Tor to resolve DNS via the passed SOCKS proxy.
//
// Deprecated: use TorLookupIPContext instead.
// TorLookupIP uses Tor to resolve DNS via the SOCKS extension they provide for
// resolution over the Tor network. Tor itself doesn't support ipv6 so this
// doesn't either.
func TorLookupIP(host, proxy string) ([]net.IP, error) {
return TorLookupIPContext(context.Background(), host, proxy)
}
// TorLookupIPContext uses Tor to resolve DNS via the passed SOCKS proxy.
func TorLookupIPContext(ctx context.Context, host, proxy string) ([]net.IP, error) {
var dialer net.Dialer
conn, err := dialer.DialContext(ctx, "tcp", proxy)
conn, err := net.Dial("tcp", proxy)
if err != nil {
return nil, err
}
defer conn.Close()
buf := []byte{0x05, 0x01, 0x00}
buf := []byte{'\x05', '\x01', '\x00'}
_, err = conn.Write(buf)
if err != nil {
return nil, err
@ -83,18 +70,18 @@ func TorLookupIPContext(ctx context.Context, host, proxy string) ([]net.IP, erro
if err != nil {
return nil, err
}
if buf[0] != 0x05 {
if buf[0] != '\x05' {
return nil, ErrTorInvalidProxyResponse
}
if buf[1] != 0x00 {
if buf[1] != '\x00' {
return nil, ErrTorUnrecognizedAuthMethod
}
buf = make([]byte, 7+len(host))
buf[0] = 5 // socks protocol version
buf[1] = torCmdResolve
buf[2] = 0 // reserved
buf[3] = torATypeDomainName
buf[0] = 5 // protocol version
buf[1] = '\xF0' // Tor Resolve
buf[2] = 0 // reserved
buf[3] = 3 // Tor Resolve
buf[4] = byte(len(host))
copy(buf[5:], host)
buf[5+len(host)] = 0 // Port 0
@ -113,39 +100,34 @@ func TorLookupIPContext(ctx context.Context, host, proxy string) ([]net.IP, erro
return nil, ErrTorInvalidProxyResponse
}
if buf[1] != 0 {
err, exists := torStatusErrors[buf[1]]
if !exists {
if int(buf[1]) > len(torStatusErrors) {
err = ErrTorInvalidProxyResponse
} else {
err = torStatusErrors[buf[1]]
if err == nil {
err = ErrTorInvalidProxyResponse
}
}
return nil, err
}
if buf[3] != torATypeIPv4 && buf[3] != torATypeIPv6 {
return nil, ErrTorInvalidAddressResponse
if buf[3] != 1 {
err := torStatusErrors[torGeneralError]
return nil, err
}
var reply [32 + 2]byte
replyLen, err := conn.Read(reply[:])
buf = make([]byte, 4)
bytes, err := conn.Read(buf)
if err != nil {
return nil, err
}
var addr net.IP
switch buf[3] {
case torATypeIPv4:
if replyLen != 4+2 {
return nil, ErrTorInvalidAddressResponse
}
r := binary.BigEndian.Uint32(reply[0:4])
addr = net.IPv4(byte(r>>24), byte(r>>16),
byte(r>>8), byte(r))
case torATypeIPv6:
if replyLen <= 4+2 {
return nil, ErrTorInvalidAddressResponse
}
addr = net.IP(reply[0 : replyLen-2])
default:
if bytes != 4 {
return nil, ErrTorInvalidAddressResponse
}
return []net.IP{addr}, nil
r := binary.BigEndian.Uint32(buf)
addr := make([]net.IP, 1)
addr[0] = net.IPv4(byte(r>>24), byte(r>>16), byte(r>>8), byte(r))
return addr, nil
}

View File

@ -1,5 +1,5 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -11,14 +11,12 @@ import (
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/decred/dcrd/blockchain/standalone"
"github.com/decred/dcrd/blockchain/v2"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/dcrutil/v2"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/wire"
)
@ -26,6 +24,10 @@ const (
// maxNonce is the maximum value a nonce can be in a block header.
maxNonce = ^uint32(0) // 2^32 - 1
// maxExtraNonce is the maximum value an extra nonce used in a coinbase
// transaction can be.
maxExtraNonce = ^uint64(0) // 2^64 - 1
// hpsUpdateSecs is the number of seconds to wait in between each
// update to the hashes per second monitor.
hpsUpdateSecs = 10
@ -47,7 +49,7 @@ var (
// defaultNumWorkers is the default number of workers to use for mining
// and is based on the number of processor cores. This helps ensure the
// system stays reasonably responsive under heavy load.
defaultNumWorkers = uint32(1)
defaultNumWorkers = uint32(chaincfg.CPUMinerThreads)
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
@ -88,7 +90,7 @@ type cpuminerConfig struct {
// block chain is current. This is used by the automatic persistent
// mining routine to determine whether or it should attempt mining.
// This is useful because there is no point in mining if the chain is
// not current since any solved blocks would be on a side chain and
// not current since any solved blocks would be on a side chain and and
// up orphaned anyways.
IsCurrent func() bool
}
@ -100,11 +102,10 @@ type cpuminerConfig struct {
// function, but the default is based on the number of processor cores in the
// system which is typically sufficient.
type CPUMiner struct {
numWorkers uint32 // update atomically
sync.Mutex
g *BlkTmplGenerator
cfg *cpuminerConfig
numWorkers uint32
started bool
discreteMining bool
submitBlockLock sync.Mutex
@ -118,7 +119,8 @@ type CPUMiner struct {
// This is a map that keeps track of how many blocks have
// been mined on each parent by the CPUMiner. It is only
// for use in simulation networks, to diminish memory
// exhaustion.
// exhaustion. It should not race because it's only
// accessed in a single threaded loop below.
minedOnParents map[chainhash.Hash]uint8
}
@ -238,7 +240,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit
// Create a couple of convenience variables.
header := &msgBlock.Header
targetDifficulty := standalone.CompactToBig(header.Bits)
targetDifficulty := blockchain.CompactToBig(header.Bits)
// Initial state.
lastGenerated := time.Now()
@ -247,10 +249,8 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit
// Note that the entire extra nonce range is iterated and the offset is
// added relying on the fact that overflow will wrap around 0 as
// provided by the Go spec. Furthermore, the break condition has been
// intentionally omitted such that the loop will continue forever until
// a solution is found.
for extraNonce := uint64(0); ; extraNonce++ {
// provided by the Go spec.
for extraNonce := uint64(0); extraNonce < maxExtraNonce; extraNonce++ {
// Update the extra nonce in the block template header with the
// new value.
littleEndian.PutUint64(header.ExtraData[:], extraNonce+enOffset)
@ -258,15 +258,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit
// Search through the entire nonce range for a solution while
// periodically checking for early quit and stale block
// conditions along with updates to the speed monitor.
//
// This loop differs from the outer one in that it does not run
// forever, thus allowing the extraNonce field to be updated
// between each successive iteration of the regular nonce
// space. Note that this is achieved by placing the break
// condition at the end of the code block, as this prevents the
// infinite loop that would otherwise occur if we let the for
// statement overflow the nonce value back to 0.
for nonce := uint32(0); ; nonce++ {
for i := uint32(0); i <= maxNonce; i++ {
select {
case <-quit:
return false
@ -301,25 +293,23 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, quit
}
// Update the nonce and hash the block header.
header.Nonce = nonce
header.Nonce = i
hash := header.BlockHash()
hashesCompleted++
// The block is solved when the new block hash is less
// than the target difficulty. Yay!
if standalone.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
if blockchain.HashToBig(&hash).Cmp(targetDifficulty) <= 0 {
select {
case m.updateHashes <- hashesCompleted:
default:
}
return true
}
if nonce == maxNonce {
break
}
}
}
return false
}
// generateBlocks is a worker that is controlled by the miningWorkerController.
@ -393,11 +383,8 @@ out:
// This prevents you from causing memory exhaustion issues
// when mining aggressively in a simulation network.
if m.cfg.PermitConnectionlessMining {
prevBlock := template.Block.Header.PrevBlock
m.Lock()
maxBlocksOnParent := m.minedOnParents[prevBlock] >= maxSimnetToMine
m.Unlock()
if maxBlocksOnParent {
if m.minedOnParents[template.Block.Header.PrevBlock] >=
maxSimnetToMine {
minrLog.Tracef("too many blocks mined on parent, stopping " +
"until there are enough votes on these to make a new " +
"block")
@ -412,10 +399,7 @@ out:
if m.solveBlock(template.Block, ticker, quit) {
block := dcrutil.NewBlock(template.Block)
m.submitBlock(block)
m.Lock()
m.minedOnParents[template.Block.Header.PrevBlock]++
m.Unlock()
}
}
@ -443,31 +427,28 @@ func (m *CPUMiner) miningWorkerController() {
}
// Launch the current number of workers by default.
numWorkers := atomic.LoadUint32(&m.numWorkers)
runningWorkers = make([]chan struct{}, 0, numWorkers)
launchWorkers(numWorkers)
runningWorkers = make([]chan struct{}, 0, m.numWorkers)
launchWorkers(m.numWorkers)
out:
for {
select {
// Update the number of running workers.
case <-m.updateNumWorkers:
numRunning := uint32(len(runningWorkers))
numWorkers := atomic.LoadUint32(&m.numWorkers)
// No change.
if numWorkers == numRunning {
numRunning := uint32(len(runningWorkers))
if m.numWorkers == numRunning {
continue
}
// Add new workers.
if numWorkers > numRunning {
launchWorkers(numWorkers - numRunning)
if m.numWorkers > numRunning {
launchWorkers(m.numWorkers - numRunning)
continue
}
// Signal the most recently created goroutines to exit.
for i := numRunning - 1; i >= numWorkers; i-- {
for i := numRunning - 1; i >= m.numWorkers; i-- {
close(runningWorkers[i])
runningWorkers[i] = nil
runningWorkers = runningWorkers[:i]
@ -569,11 +550,16 @@ func (m *CPUMiner) SetNumWorkers(numWorkers int32) {
m.Stop()
}
// Don't lock until after the first check since Stop does its own
// locking.
m.Lock()
defer m.Unlock()
// Use default if provided value is negative.
if numWorkers < 0 {
atomic.StoreUint32(&m.numWorkers, defaultNumWorkers)
m.numWorkers = defaultNumWorkers
} else {
atomic.StoreUint32(&m.numWorkers, uint32(numWorkers))
m.numWorkers = uint32(numWorkers)
}
// When the miner is already running, notify the controller about the
@ -587,7 +573,10 @@ func (m *CPUMiner) SetNumWorkers(numWorkers int32) {
//
// This function is safe for concurrent access.
func (m *CPUMiner) NumWorkers() int32 {
return int32(atomic.LoadUint32(&m.numWorkers))
m.Lock()
defer m.Unlock()
return int32(m.numWorkers)
}
// GenerateNBlocks generates the requested number of blocks. It is self
@ -608,7 +597,7 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) {
if m.started || m.discreteMining {
m.Unlock()
return nil, errors.New("server is already CPU mining. Please call " +
"`setgenerate 0` before calling discrete `generate` commands")
"`setgenerate 0` before calling discrete `generate` commands.")
}
m.started = true

View File

@ -1,3 +0,0 @@
module github.com/decred/dcrd/crypto/ripemd160
go 1.11

View File

@ -1,120 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ripemd160 implements the RIPEMD-160 hash algorithm.
package ripemd160
// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart
// Preneel with specifications available at:
// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
import (
"crypto"
"hash"
)
func init() {
crypto.RegisterHash(crypto.RIPEMD160, New)
}
// The size of the checksum in bytes.
const Size = 20
// The block size of the hash algorithm in bytes.
const BlockSize = 64
const (
_s0 = 0x67452301
_s1 = 0xefcdab89
_s2 = 0x98badcfe
_s3 = 0x10325476
_s4 = 0xc3d2e1f0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [5]uint32 // running context
x [BlockSize]byte // temporary buffer
nx int // index into x
tc uint64 // total count of bytes processed
}
func (d *digest) Reset() {
d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
d.nx = 0
d.tc = 0
}
// New returns a new hash.Hash computing the checksum.
func New() hash.Hash {
result := new(digest)
result.Reset()
return result
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.tc += uint64(nn)
if d.nx > 0 {
n := len(p)
if n > BlockSize-d.nx {
n = BlockSize - d.nx
}
for i := 0; i < n; i++ {
d.x[d.nx+i] = p[i]
}
d.nx += n
if d.nx == BlockSize {
_Block(d, d.x[0:])
d.nx = 0
}
p = p[n:]
}
n := _Block(d, p)
p = p[n:]
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d0 *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d := *d0
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
tc := d.tc
var tmp [64]byte
tmp[0] = 0x80
if tc%64 < 56 {
d.Write(tmp[0 : 56-tc%64])
} else {
d.Write(tmp[0 : 64+56-tc%64])
}
// Length in bits.
tc <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(tc >> (8 * i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.s {
digest[i*4] = byte(s)
digest[i*4+1] = byte(s >> 8)
digest[i*4+2] = byte(s >> 16)
digest[i*4+3] = byte(s >> 24)
}
return append(in, digest[:]...)
}

View File

@ -1,72 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ripemd160
// Test vectors are from:
// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
import (
"fmt"
"io"
"testing"
)
type mdTest struct {
out string
in string
}
var vectors = [...]mdTest{
{"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"},
{"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"},
{"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"},
{"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"},
{"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"},
{"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
}
func TestVectors(t *testing.T) {
for i := 0; i < len(vectors); i++ {
tv := vectors[i]
md := New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(md, tv.in)
} else {
io.WriteString(md, tv.in[0:len(tv.in)/2])
md.Sum(nil)
io.WriteString(md, tv.in[len(tv.in)/2:])
}
s := fmt.Sprintf("%x", md.Sum(nil))
if s != tv.out {
t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out)
}
md.Reset()
}
}
}
func millionA() string {
md := New()
for i := 0; i < 100000; i++ {
io.WriteString(md, "aaaaaaaaaa")
}
return fmt.Sprintf("%x", md.Sum(nil))
}
func TestMillionA(t *testing.T) {
const out = "52783243c1697bdbe16d37f97f68f08325dc1528"
if s := millionA(); s != out {
t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out)
}
}
func BenchmarkMillionA(b *testing.B) {
for i := 0; i < b.N; i++ {
millionA()
}
}

View File

@ -1,165 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RIPEMD-160 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
package ripemd160
import (
"math/bits"
)
// work buffer indices and roll amounts for one line
var _n = [80]uint{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
}
var _r = [80]uint{
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
}
// same for the other parallel one
var n_ = [80]uint{
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
}
var r_ = [80]uint{
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
}
func _Block(md *digest, p []byte) int {
n := 0
var x [16]uint32
var alpha, beta uint32
for len(p) >= BlockSize {
a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
aa, bb, cc, dd, ee := a, b, c, d, e
j := 0
for i := 0; i < 16; i++ {
x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
j += 4
}
// round 1
i := 0
for i < 16 {
alpha = a + (b ^ c ^ d) + x[_n[i]]
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 2
for i < 32 {
alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 3
for i < 48 {
alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 4
for i < 64 {
alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 5
for i < 80 {
alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// combine results
dd += c + md.s[1]
md.s[1] = md.s[2] + d + ee
md.s[2] = md.s[3] + e + aa
md.s[3] = md.s[4] + a + bb
md.s[4] = md.s[0] + b + cc
md.s[0] = dd
p = p[BlockSize:]
n += BlockSize
}
return n
}

View File

@ -1,7 +1,7 @@
database
========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/database)

View File

@ -36,7 +36,7 @@ type Driver struct {
var drivers = make(map[string]*Driver)
// RegisterDriver adds a backend database driver to available interfaces.
// ErrDbTypeRegistered will be returned if the database type for the driver has
// ErrDbTypeRegistered will be retruned if the database type for the driver has
// already been registered.
func RegisterDriver(driver Driver) error {
if _, exists := drivers[driver.DbType]; exists {
@ -63,7 +63,7 @@ func SupportedDrivers() []string {
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the database type is not registered.
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
@ -78,7 +78,7 @@ func Create(dbType string, args ...interface{}) (DB, error) {
// specific to the database type driver. See the documentation for the database
// driver for further details.
//
// ErrDbUnknownType will be returned if the database type is not registered.
// ErrDbUnknownType will be returned if the the database type is not registered.
func Open(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {

View File

@ -22,7 +22,7 @@ var (
)
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {

View File

@ -82,14 +82,14 @@ const (
// ErrKeyRequired indicates at attempt to insert a zero-length key.
ErrKeyRequired
// ErrKeyTooLarge indicates an attempt to insert a key that is larger
// ErrKeyTooLarge indicates an attmempt to insert a key that is larger
// than the max allowed key size. The max key size depends on the
// specific backend driver being used. As a general rule, key sizes
// should be relatively, so this should rarely be an issue.
ErrKeyTooLarge
// ErrValueTooLarge indicates an attempt to insert a value that is
// larger than max allowed value size. The max key size depends on the
// ErrValueTooLarge indicates an attmpt to insert a value that is larger
// than max allowed value size. The max key size depends on the
// specific backend driver being used.
ErrValueTooLarge

View File

@ -24,7 +24,7 @@ func ExampleCreate() {
//
// import (
// "github.com/decred/dcrd/database2"
// _ "github.com/decred/dcrd/database/v2/ffldb"
// _ "github.com/decred/dcrd/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.

View File

@ -4,7 +4,7 @@
// license that can be found in the LICENSE file.
/*
This test file is part of the database package rather than the
This test file is part of the database package rather than than the
database_test package so it can bridge access to the internals to properly test
cases which are either not possible or can't reliably be tested via the public
interface. The functions, constants, and variables are only exported while the

View File

@ -1,7 +1,7 @@
ffldb
=====
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![Build Status](https://img.shields.io/travis/decred/dcrd.svg)](https://travis-ci.org/decred/dcrd)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/decred/dcrd/database/ffldb)

View File

@ -134,10 +134,10 @@ type blockStore struct {
// lruMutex protects concurrent access to the least recently used list
// and lookup map.
//
// openBlocksLRU tracks how the open files are referenced by pushing the
// openBlocksLRU tracks how the open files are refenced by pushing the
// most recently used files to the front of the list thereby trickling
// the least recently used files to end of the list. When a file needs
// to be closed due to exceeding the max number of allowed open
// to be closed due to exceeding the the max number of allowed open
// files, the one at the end of the list is closed.
//
// fileNumToLRUElem is a mapping between a specific block file number
@ -744,7 +744,7 @@ func scanBlockFiles(dbPath string) (int, uint32) {
// and offset set and all fields initialized.
func newBlockStore(basePath string, network wire.CurrencyNet) *blockStore {
// Look for the end of the latest block to file to determine what the
// write cursor position is from the viewpoint of the block files on
// write cursor position is from the viewpoing of the block files on
// disk.
fileNum, fileOff := scanBlockFiles(basePath)
if fileNum == -1 {

View File

@ -132,7 +132,7 @@ func makeDbErr(c database.ErrorCode, desc string, err error) database.Error {
}
// convertErr converts the passed leveldb error into a database error with an
// equivalent error code and the passed description. It also sets the passed
// equivalent error code and the passed description. It also sets the passed
// error as the underlying error.
func convertErr(desc string, ldbErr error) database.Error {
// Use the driver-specific error code by default. The code below will
@ -1015,7 +1015,7 @@ func (tx *transaction) notifyActiveIters() {
tx.activeIterLock.RUnlock()
}
// checkClosed returns an error if the database or transaction is closed.
// checkClosed returns an error if the the database or transaction is closed.
func (tx *transaction) checkClosed() error {
// The transaction is no longer valid if it has been closed.
if tx.closed {
@ -1086,11 +1086,11 @@ func (tx *transaction) fetchKey(key []byte) []byte {
// NOTE: This function must only be called on a writable transaction. Since it
// is an internal helper function, it does not check.
func (tx *transaction) deleteKey(key []byte, notifyIterators bool) {
// Remove the key from the list of pending keys to be written on
// Remove the key from the list of pendings keys to be written on
// transaction commit if needed.
tx.pendingKeys.Delete(key)
// Add the key to the list to be deleted on transaction commit.
// Add the key to the list to be deleted on transaction commit.
tx.pendingRemove.Put(key, nil)
// Notify the active iterators about the change if the flag is set.
@ -1999,7 +1999,7 @@ func (db *db) Close() error {
return closeErr
}
// fileExists reports whether the named file or directory exists.
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {

View File

@ -468,9 +468,9 @@ func (c *dbCache) commitTreaps(pendingKeys, pendingRemove TreapForEacher) error
})
}
// flush flushes the database cache to persistent storage. This involves
// syncing the block store and replaying all transactions that have been
// applied to the cache to the underlying database.
// flush flushes the database cache to persistent storage. This involes syncing
// the block store and replaying all transactions that have been applied to the
// cache to the underlying database.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) flush() error {

View File

@ -79,7 +79,7 @@ func init() {
UseLogger: useLogger,
}
if err := database.RegisterDriver(driver); err != nil {
panic(fmt.Sprintf("Failed to register database driver '%s': %v",
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
dbType, err))
}
}

View File

@ -4,7 +4,7 @@
// license that can be found in the LICENSE file.
/*
This test file is part of the ffldb package rather than the ffldb_test
This test file is part of the ffldb package rather than than the ffldb_test
package so it can bridge access to the internals to properly test cases which
are either not possible or can't reliably be tested via the public interface.
The functions are only exported while the tests are being run.

View File

@ -89,7 +89,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.CurrencyNet) ([]*dcr
}
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
@ -230,7 +230,7 @@ func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair)
return true
}
// testCursorInterface ensures the cursor interface is working properly by
// testCursorInterface ensures the cursor itnerface is working properly by
// exercising all of its functions on the passed bucket.
func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
// Ensure a cursor can be obtained for the bucket.
@ -615,7 +615,7 @@ func rollbackOnPanic(t *testing.T, tx database.Tx) {
func testMetadataManualTxInterface(tc *testContext) bool {
// populateValues tests that populating values works as expected.
//
// When the writable flag is false, a read-only transaction is created,
// When the writable flag is false, a read-only tranasction is created,
// standard bucket tests for read-only transactions are performed, and
// the Commit function is checked to ensure it fails as expected.
//
@ -1189,7 +1189,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
// testFetchBlockIO ensures all of the block retrieval API functions work as
// expected for the provide set of blocks. The blocks must already be stored in
// the database, or at least stored into the passed transaction. It also
// the database, or at least stored into the the passed transaction. It also
// tests several error conditions such as ensuring the expected errors are
// returned when fetching blocks, headers, and regions that don't exist.
func testFetchBlockIO(tc *testContext, tx database.Tx) bool {

View File

@ -84,7 +84,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.CurrencyNet) ([]*dcr
}
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
@ -142,7 +142,7 @@ func TestConvertErr(t *testing.T) {
func TestCornerCases(t *testing.T) {
t.Parallel()
// Create a file at the database path to force the open below to fail.
// Create a file at the datapase path to force the open below to fail.
dbPath := filepath.Join(os.TempDir(), "ffldb-errors-v2")
_ = os.RemoveAll(dbPath)
fi, err := os.Create(dbPath)
@ -195,7 +195,7 @@ func TestCornerCases(t *testing.T) {
ldb := idb.(*db).cache.ldb
ldb.Close()
// Ensure initialization errors in the underlying database work as
// Ensure initilization errors in the underlying database work as
// expected.
testName = "initDB: reinitialization"
wantErrCode = database.ErrDbNotOpen

View File

@ -449,7 +449,7 @@ type DB interface {
//
// NOTE: The transaction must be closed by calling Rollback or Commit on
// it when it is no longer needed. Failure to do so can result in
// unclaimed memory and/or inability to close the database due to locks
// unclaimed memory and/or inablity to close the database due to locks
// depending on the specific database implementation.
Begin(writable bool) (Tx, error)

Some files were not shown because too many files have changed in this diff Show More