diff --git a/.gitignore b/.gitignore index 5713f689..f015be4c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,33 +1,4 @@ -# Temp files +cmd/dcrd/dcrd +cmd/dcrd/dcrctl *~ - -# Databases -btcd.db -*-shm -*-wal - -# Log files -*.log - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe +*.pyc diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0ab0861c..00000000 --- a/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -go: - - 1.3.3 - - 1.4.2 -sudo: false -before_install: - - gotools=golang.org/x/tools - - if [ "$TRAVIS_GO_VERSION" = "go1.3.3" ]; then gotools=code.google.com/p/go.tools; fi -install: - - go get -d -t -v ./... - - go get -v $gotools/cmd/cover - - go get -v $gotools/cmd/vet - - go get -v github.com/bradfitz/goimports - - go get -v github.com/golang/lint/golint -script: - - export PATH=$PATH:$HOME/gopath/bin - - ./goclean.sh diff --git a/CHANGES b/CHANGES index ba3be7c3..037908c2 100644 --- a/CHANGES +++ b/CHANGES @@ -393,7 +393,7 @@ Changes in 0.8.0-beta (Sun May 25 2014) - Reduce max bytes allowed for a standard nulldata transaction to 40 for compatibility with the reference client - Introduce a new btcnet package which houses all of the network params - for each network (mainnet, testnet3, regtest) to ultimately enable + for each network (mainnet, testnet, regtest) to ultimately enable easier addition and tweaking of networks without needing to change several packages - Fix several script discrepancies found by reference client test data @@ -410,7 +410,7 @@ Changes in 0.8.0-beta (Sun May 25 2014) - Provide options to control block template creation settings - Support the getwork RPC - Allow address identifiers to apply to more than one network since both - testnet3 and the regression test network unfortunately use the same + testnet and the regression test network unfortunately use the same identifier - RPC changes: - Set the content type for HTTP POST RPC connections to application/json diff --git a/LICENSE b/LICENSE index 671b93e1..53564486 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2013-2015 The btcsuite developers +Copyright (c) 2015-2016 The Decred developers Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/README.md b/README.md index 41d32487..2f0ea88e 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,7 @@ -btcd +dcrd ==== -[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] -(https://travis-ci.org/btcsuite/btcd) - -btcd is an alternative full node bitcoin implementation written in Go (golang). +dcrd is a Decred full node implementation written in Go (golang). This project is currently under active development and is in a Beta state. It is extremely stable and has been in production use for over 6 months as of May @@ -13,7 +10,7 @@ we come out of beta. It properly downloads, validates, and serves the block chain using the exact rules (including bugs) for block acceptance as Bitcoin Core. We have taken -great care to avoid btcd causing a fork to the block chain. It passes all of +great care to avoid dcrd causing a fork to the block chain. It passes all of the 'official' block acceptance tests (https://github.com/TheBlueMatt/test-scripts) as well as all of the JSON test data in the Bitcoin Core code. @@ -24,13 +21,13 @@ transactions admitted to the pool follow the rules required by the block chain and also includes the same checks which filter transactions based on miner requirements ("standard" transactions) as Bitcoin Core. -One key difference between btcd and Bitcoin Core is that btcd does *NOT* include +One key difference between dcrd and Bitcoin Core is that dcrd does *NOT* include wallet functionality and this was a very intentional design decision. See the -blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon) +blog entry [here](https://blog.conformal.com/dcrd-not-your-moms-bitcoin-daemon) for more details. This means you can't actually make or receive payments -directly with btcd. That functionality is provided by the -[btcwallet](https://github.com/btcsuite/btcwallet) and -[btcgui](https://github.com/btcsuite/btcgui) projects which are both under +directly with dcrd. That functionality is provided by the +[dcrwallet](https://github.com/decred/dcrwallet) and +[btcgui](https://github.com/decred/btcgui) projects which are both under active development. ## Requirements @@ -41,7 +38,7 @@ active development. #### Windows - MSI Available -https://github.com/btcsuite/btcd/releases +https://github.com/decred/dcrd/releases #### Linux/BSD/MacOSX/POSIX - Build from Source @@ -59,13 +56,13 @@ NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is recommended that `GOPATH` is set to a directory in your home directory such as `~/goprojects` to avoid write permission issues. -- Run the following command to obtain btcd, all dependencies, and install it: +- Run the following command to obtain dcrd, all dependencies, and install it: ```bash -$ go get -u github.com/btcsuite/btcd/... +$ go get -u github.com/decred/dcrd/... ``` -- btcd (and utilities) will now be installed in either ```$GOROOT/bin``` or +- dcrd (and utilities) will now be installed in either ```$GOROOT/bin``` or ```$GOPATH/bin``` depending on your configuration. If you did not already add the bin directory to your system path during Go installation, we recommend you do so now. @@ -78,70 +75,43 @@ Install a newer MSI #### Linux/BSD/MacOSX/POSIX - Build from Source -- Run the following command to update btcd, all dependencies, and install it: +- Run the following command to update dcrd, all dependencies, and install it: ```bash -$ go get -u -v github.com/btcsuite/btcd/... +$ go get -u -v github.com/decred/dcrd/... ``` ## Getting Started -btcd has several configuration options avilable to tweak how it runs, but all +dcrd has several configuration options avilable to tweak how it runs, but all of the basic operations described in the intro section work with zero configuration. #### Windows (Installed from MSI) -Launch btcd from your Start menu. +Launch dcrd from your Start menu. #### Linux/BSD/POSIX/Source ```bash -$ ./btcd +$ ./dcrd ```` ## IRC - irc.freenode.net -- channel #btcd -- [webchat](https://webchat.freenode.net/?channels=btcd) - -## Mailing lists - -- btcd: discussion of btcd and its packages. -- btcd-commits: readonly mail-out of source code changes. - -To subscribe to a given list, send email to list+subscribe@opensource.conformal.com +- channel #decred +- [webchat](https://webchat.freenode.net/?channels=decred) ## Issue Tracker -The [integrated github issue tracker](https://github.com/btcsuite/btcd/issues) +The [integrated github issue tracker](https://github.com/decred/dcrd/issues) is used for this project. ## Documentation -The documentation is a work-in-progress. It is located in the [docs](https://github.com/btcsuite/btcd/tree/master/docs) folder. - -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` +The documentation is a work-in-progress. It is located in the [docs](https://github.com/decred/dcrd/tree/master/docs) folder. ## License -btcd is licensed under the [copyfree](http://copyfree.org) ISC License. +dcrd is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go index c2d03a50..b8b0bf30 100644 --- a/addrmgr/addrmanager.go +++ b/addrmgr/addrmanager.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -22,11 +23,12 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // AddrManager provides a concurrency safe address manager for caching potential -// peers on the bitcoin network. +// peers on the decred network. type AddrManager struct { mtx sync.Mutex peersFile string @@ -293,13 +295,14 @@ func (a *AddrManager) pickTried(bucket int) *list.Element { func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { // bitcoind: - // doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets + // doublesha256(key + sourcegroup + int64(doublesha256(key + group + // + sourcegroup))%bucket_per_source_group) % num_new_buckets data1 := []byte{} data1 = append(data1, a.key[:]...) data1 = append(data1, []byte(GroupKey(netAddr))...) data1 = append(data1, []byte(GroupKey(srcAddr))...) - hash1 := wire.DoubleSha256(data1) + hash1 := chainhash.HashFuncB(data1) hash64 := binary.LittleEndian.Uint64(hash1) hash64 %= newBucketsPerGroup var hashbuf [8]byte @@ -309,17 +312,18 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { data2 = append(data2, GroupKey(srcAddr)...) data2 = append(data2, hashbuf[:]...) - hash2 := wire.DoubleSha256(data2) + hash2 := chainhash.HashFuncB(data2) return int(binary.LittleEndian.Uint64(hash2) % newBucketCount) } func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { // bitcoind hashes this as: - // doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets + // doublesha256(key + group + truncate_to_64bits(doublesha256(key)) + // % buckets_per_group) % num_buckets data1 := []byte{} data1 = append(data1, a.key[:]...) data1 = append(data1, []byte(NetAddressKey(netAddr))...) - hash1 := wire.DoubleSha256(data1) + hash1 := chainhash.HashFuncB(data1) hash64 := binary.LittleEndian.Uint64(hash1) hash64 %= triedBucketsPerGroup var hashbuf [8]byte @@ -329,7 +333,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { data2 = append(data2, GroupKey(netAddr)...) data2 = append(data2, hashbuf[:]...) - hash2 := wire.DoubleSha256(data2) + hash2 := chainhash.HashFuncB(data2) return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount) } @@ -1085,7 +1089,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net return bestAddress } -// New returns a new bitcoin address manager. +// New returns a new decred address manager. // Use Start to begin processing asynchronous address updates. func New(dataDir string, lookupFunc func(string) ([]net.IP, error)) *AddrManager { am := AddrManager{ diff --git a/addrmgr/addrmanager_test.go b/addrmgr/addrmanager_test.go index a6d2fb92..43492df9 100644 --- a/addrmgr/addrmanager_test.go +++ b/addrmgr/addrmanager_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,8 +13,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/addrmgr" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/wire" ) // naTest is used to describe a test to be perfomed against the NetAddressKey diff --git a/addrmgr/doc.go b/addrmgr/doc.go index 0f1de09f..b7a5402d 100644 --- a/addrmgr/doc.go +++ b/addrmgr/doc.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package addrmgr implements concurrency safe Bitcoin address manager. +Package addrmgr implements concurrency safe Decred address manager. Address Manager Overview -In order maintain the peer-to-peer Bitcoin network, there needs to be a source -of addresses to connect to as nodes come and go. The Bitcoin protocol provides +In order maintain the peer-to-peer Decred network, there needs to be a source +of addresses to connect to as nodes come and go. The Decred protocol provides a the getaddr and addr messages to allow peers to communicate known addresses with each other. However, there needs to a mechanism to store those results and select peers from them. It is also important to note that remote peers can't diff --git a/addrmgr/internal_test.go b/addrmgr/internal_test.go index e50e923c..5c0ae128 100644 --- a/addrmgr/internal_test.go +++ b/addrmgr/internal_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +8,7 @@ package addrmgr import ( "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) func TstKnownAddressIsBad(ka *KnownAddress) bool { diff --git a/addrmgr/knownaddress.go b/addrmgr/knownaddress.go index 73b6f0f3..aeee16c6 100644 --- a/addrmgr/knownaddress.go +++ b/addrmgr/knownaddress.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +8,7 @@ package addrmgr import ( "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // KnownAddress tracks information about a known network address that is used diff --git a/addrmgr/knownaddress_test.go b/addrmgr/knownaddress_test.go index 05609ec9..6576f331 100644 --- a/addrmgr/knownaddress_test.go +++ b/addrmgr/knownaddress_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/addrmgr" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/wire" ) func TestChance(t *testing.T) { diff --git a/addrmgr/log.go b/addrmgr/log.go index b3ebbd15..064a081d 100644 --- a/addrmgr/log.go +++ b/addrmgr/log.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/addrmgr/network.go b/addrmgr/network.go index e891ad6f..6802e937 100644 --- a/addrmgr/network.go +++ b/addrmgr/network.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "fmt" "net" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) var ( diff --git a/addrmgr/network_test.go b/addrmgr/network_test.go index 98dc054e..3de0d327 100644 --- a/addrmgr/network_test.go +++ b/addrmgr/network_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/addrmgr" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/wire" ) // TestIPTypes ensures the various functions which determine the type of an IP diff --git a/addrmgr/test_coverage.txt b/addrmgr/test_coverage.txt index c67e0f6d..59a0090c 100644 --- a/addrmgr/test_coverage.txt +++ b/addrmgr/test_coverage.txt @@ -1,62 +1,62 @@ -github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6) -github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4) -github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4) -github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3) -github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2) -github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1) -github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1) -github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11) -github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33) -github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1) -github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1) -github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537) +github.com/decred/dcrd/addrmgr/network.go GroupKey 100.00% (23/23) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 100.00% (19/19) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.getNewBucket 100.00% (15/15) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 100.00% (14/14) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.getTriedBucket 100.00% (14/14) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 100.00% (11/11) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.pickTried 100.00% (8/8) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6) +github.com/decred/dcrd/addrmgr/network.go IsRFC1918 100.00% (4/4) +github.com/decred/dcrd/addrmgr/network.go IsRFC5737 100.00% (4/4) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.AddAddresses 100.00% (4/4) +github.com/decred/dcrd/addrmgr/addrmanager.go New 100.00% (3/3) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 100.00% (3/3) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.AddAddress 100.00% (3/3) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.NumAddresses 100.00% (3/3) +github.com/decred/dcrd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2) +github.com/decred/dcrd/addrmgr/network.go IsRFC4862 100.00% (1/1) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1) +github.com/decred/dcrd/addrmgr/log.go init 100.00% (1/1) +github.com/decred/dcrd/addrmgr/knownaddress.go KnownAddress.NetAddress 100.00% (1/1) +github.com/decred/dcrd/addrmgr/knownaddress.go KnownAddress.LastAttempt 100.00% (1/1) +github.com/decred/dcrd/addrmgr/log.go DisableLog 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go ipNet 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsIPv4 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsLocal 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsOnionCatTor 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC2544 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC3849 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC3927 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC3964 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC4193 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC4380 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC4843 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC6052 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC6145 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRFC6598 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsValid 100.00% (1/1) +github.com/decred/dcrd/addrmgr/network.go IsRoutable 100.00% (1/1) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.find 100.00% (1/1) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.GetAddress 95.35% (41/43) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.Good 93.18% (41/44) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.Connected 90.00% (9/10) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.addressHandler 88.89% (8/9) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.AddressCache 87.50% (14/16) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.Attempt 85.71% (6/7) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.Start 83.33% (5/6) +github.com/decred/dcrd/addrmgr/knownaddress.go KnownAddress.chance 76.92% (10/13) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 71.43% (5/7) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.Stop 71.43% (5/7) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.updateAddress 53.33% (16/30) +github.com/decred/dcrd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.savePeers 51.28% (20/39) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.expireNew 50.00% (11/22) +github.com/decred/dcrd/addrmgr/addrmanager.go ipString 50.00% (2/4) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.loadPeers 45.45% (5/11) +github.com/decred/dcrd/addrmgr/knownaddress.go KnownAddress.isBad 36.36% (4/11) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 26.67% (4/15) +github.com/decred/dcrd/addrmgr/addrmanager.go AddrManager.deserializePeers 6.00% (3/50) +github.com/decred/dcrd/addrmgr/log.go UseLogger 0.00% (0/1) +github.com/decred/dcrd/addrmgr --------------------------------- 71.69% (385/537) diff --git a/blockchain/README.md b/blockchain/README.md index d4774d80..8b35147e 100644 --- a/blockchain/README.md +++ b/blockchain/README.md @@ -1,11 +1,10 @@ blockchain ========== -[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)] -(https://travis-ci.org/btcsuite/btcd) [![ISC License] +[![ISC License] (http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -Package blockchain implements bitcoin block handling and chain selection rules. +Package blockchain implements decred block handling and chain selection rules. The test coverage is currently only around 60%, but will be increasing over time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if you are running a POSIX OS, you can run the `cov_report.sh` script for a @@ -15,29 +14,29 @@ There is an associated blog post about the release of this package [here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/). This package has intentionally been designed so it can be used as a standalone -package for any projects needing to handle processing of blocks into the bitcoin +package for any projects needing to handle processing of blocks into the decred block chain. ## Documentation [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/btcsuite/btcd/blockchain) +(http://godoc.org/github.com/decred/dcrd/blockchain) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/btcsuite/btcd/blockchain +http://godoc.org/github.com/decred/dcrd/blockchain You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/blockchain +http://localhost:6060/pkg/github.com/decred/dcrd/blockchain ## Installation ```bash -$ go get github.com/btcsuite/btcd/blockchain +$ go get github.com/decred/dcrd/blockchain ``` -## Bitcoin Chain Processing Overview +## Decred Chain Processing Overview Before a block is allowed into the block chain, it must go through an intensive series of validation rules. The following list serves as a general outline of @@ -75,43 +74,23 @@ is by no means exhaustive: ## Examples * [ProcessBlock Example] - (http://godoc.org/github.com/btcsuite/btcd/blockchain#example-BlockChain-ProcessBlock) + (http://godoc.org/github.com/decred/dcrd/blockchain#example-BlockChain-ProcessBlock) Demonstrates how to create a new chain instance and use ProcessBlock to attempt to attempt add a block to the chain. This example intentionally attempts to insert a duplicate genesis block to illustrate how an invalid block is handled. * [CompactToBig Example] - (http://godoc.org/github.com/btcsuite/btcd/blockchain#example-CompactToBig) + (http://godoc.org/github.com/decred/dcrd/blockchain#example-CompactToBig) Demonstrates how to convert the compact "bits" in a block header which represent the target difficulty to a big integer and display it using the typical hex notation. * [BigToCompact Example] - (http://godoc.org/github.com/btcsuite/btcd/blockchain#example-BigToCompact) + (http://godoc.org/github.com/decred/dcrd/blockchain#example-BigToCompact) Demonstrates how to convert how to convert a target difficulty into the compact "bits" in a block header which represent that target difficulty. -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - ## License diff --git a/blockchain/accept.go b/blockchain/accept.go index c5b07d52..547f354d 100644 --- a/blockchain/accept.go +++ b/blockchain/accept.go @@ -1,10 +1,176 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain -import "github.com/btcsuite/btcutil" +import ( + "encoding/binary" + "fmt" + "math" + "time" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrutil" +) + +// checkCoinbaseUniqueHeight checks to ensure that for all blocks height > 1 +// that the coinbase contains the height encoding to make coinbase hash collisions +// impossible. +func checkCoinbaseUniqueHeight(blockHeight int64, block *dcrutil.Block) error { + if !(len(block.MsgBlock().Transactions) > 0) { + str := fmt.Sprintf("block %v has no coinbase", block.Sha()) + return ruleError(ErrNoTransactions, str) + } + + // Coinbase TxOut[0] is always tax, TxOut[1] is always + // height + extranonce, so at least two outputs must + // exist. + if !(len(block.MsgBlock().Transactions[0].TxOut) > 1) { + str := fmt.Sprintf("block %v is missing necessary coinbase "+ + "outputs", block.Sha()) + return ruleError(ErrFirstTxNotCoinbase, str) + } + + // The first 4 bytes of the NullData output must be the + // encoded height of the block, so that every coinbase + // created has a unique transaction hash. + nullData, err := txscript.GetNullDataContent( + block.MsgBlock().Transactions[0].TxOut[1].Version, + block.MsgBlock().Transactions[0].TxOut[1].PkScript) + if err != nil { + str := fmt.Sprintf("block %v txOut 1 has wrong pkScript "+ + "type", block.Sha()) + return ruleError(ErrFirstTxNotCoinbase, str) + } + + if len(nullData) < 4 { + str := fmt.Sprintf("block %v txOut 1 has too short nullData "+ + "push to contain height", block.Sha()) + return ruleError(ErrFirstTxNotCoinbase, str) + } + + // Check the height and ensure it is correct. + cbHeight := binary.LittleEndian.Uint32(nullData[0:4]) + if cbHeight != uint32(blockHeight) { + prevBlock := block.MsgBlock().Header.PrevBlock + str := fmt.Sprintf("block %v txOut 1 has wrong height in "+ + "coinbase; want %v, got %v; prevBlock %v, header height %v", + block.Sha(), blockHeight, cbHeight, prevBlock, + block.MsgBlock().Header.Height) + return ruleError(ErrCoinbaseHeight, str) + } + + return nil +} + +// IsFinalizedTransaction determines whether or not a transaction is finalized. +func IsFinalizedTransaction(tx *dcrutil.Tx, blockHeight int64, + blockTime time.Time) bool { + msgTx := tx.MsgTx() + + // Lock time of zero means the transaction is finalized. + lockTime := msgTx.LockTime + if lockTime == 0 { + return true + } + + // The lock time field of a transaction is either a block height at + // which the transaction is finalized or a timestamp depending on if the + // value is before the txscript.LockTimeThreshold. When it is under the + // threshold it is a block height. + blockTimeOrHeight := int64(0) + if lockTime < txscript.LockTimeThreshold { + blockTimeOrHeight = blockHeight + } else { + blockTimeOrHeight = blockTime.Unix() + } + if int64(lockTime) < blockTimeOrHeight { + return true + } + + // At this point, the transaction's lock time hasn't occured yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for _, txIn := range msgTx.TxIn { + if txIn.Sequence != math.MaxUint32 { + return false + } + } + return true +} + +// checkBlockContext peforms several validation checks on the block which depend +// on its position within the block chain. +// +// The flags modify the behavior of this function as follows: +// - BFFastAdd: The transaction are not checked to see if they are finalized +// and the somewhat expensive duplication transaction check is not performed. +// +// The flags are also passed to checkBlockHeaderContext. See its documentation +// for how the flags modify its behavior. +func (b *BlockChain) checkBlockContext(block *dcrutil.Block, prevNode *blockNode, + flags BehaviorFlags) error { + // The genesis block is valid by definition. + if prevNode == nil { + return nil + } + + // Perform all block header related validation checks. + header := &block.MsgBlock().Header + err := b.checkBlockHeaderContext(header, prevNode, flags) + if err != nil { + return err + } + + fastAdd := flags&BFFastAdd == BFFastAdd + if !fastAdd { + // The height of this block is one more than the referenced + // previous block. + blockHeight := prevNode.height + 1 + + // Ensure all transactions in the block are finalized. + for _, tx := range block.Transactions() { + if !IsFinalizedTransaction(tx, blockHeight, + header.Timestamp) { + + str := fmt.Sprintf("block contains unfinalized regular "+ + "transaction %v", tx.Sha()) + return ruleError(ErrUnfinalizedTx, str) + } + } + for _, stx := range block.STransactions() { + if !IsFinalizedTransaction(stx, blockHeight, + header.Timestamp) { + + str := fmt.Sprintf("block contains unfinalized stake "+ + "transaction %v", stx.Sha()) + return ruleError(ErrUnfinalizedTx, str) + } + } + + // Check that the node is at the correct height in the blockchain, + // as specified in the block header. + if blockHeight != int64(block.MsgBlock().Header.Height) { + errStr := fmt.Sprintf("Block header height invalid; expected %v"+ + " but %v was found", blockHeight, header.Height) + return ruleError(ErrBadBlockHeight, errStr) + } + + // Check that the coinbase contains at minimum the block + // height in output 1. + if blockHeight > 1 { + err := checkCoinbaseUniqueHeight(blockHeight, block) + if err != nil { + return err + } + } + } + + return nil +} // maybeAcceptBlock potentially accepts a block into the memory block chain. // It performs several validation checks which depend on its position within @@ -14,18 +180,16 @@ import "github.com/btcsuite/btcutil" // The flags modify the behavior of this function as follows: // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. -// -// The flags are also passed to checkBlockContext and connectBestChain. See -// their documentation for how the flags modify their behavior. -func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) error { +func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block, + flags BehaviorFlags) (bool, error) { dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { - log.Errorf("getPrevNodeFromBlock: %v", err) - return err + log.Debugf("getPrevNodeFromBlock: %v", err) + return false, err } // The height of this block is one more than the referenced previous @@ -40,7 +204,7 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) // position of the block within the block chain. err = b.checkBlockContext(block, prevNode, flags) if err != nil { - return err + return false, err } // Prune block nodes which are no longer needed before creating @@ -48,14 +212,22 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) if !dryRun { err = b.pruneBlockNodes() if err != nil { - return err + return false, err } } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). blockHeader := &block.MsgBlock().Header - newNode := newBlockNode(blockHeader, block.Sha(), blockHeight) + voteBitsStake := make([]uint16, 0) + for _, stx := range block.STransactions() { + if is, _ := stake.IsSSGen(stx); is { + vb := stake.GetSSGenVoteBits(stx) + voteBitsStake = append(voteBitsStake, vb) + } + } + + newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, voteBitsStake) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight @@ -65,17 +237,19 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. - err = b.connectBestChain(newNode, block, flags) + var onMainChain bool + onMainChain, err = b.connectBestChain(newNode, block, flags) if err != nil { - return err + return false, err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { - b.sendNotification(NTBlockAccepted, block) + b.sendNotification(NTBlockAccepted, + &BlockAcceptedNtfnsData{onMainChain, block}) } - return nil + return onMainChain, nil } diff --git a/blockchain/bench_test.go b/blockchain/bench_test.go index a680feca..bfced7ee 100644 --- a/blockchain/bench_test.go +++ b/blockchain/bench_test.go @@ -1,32 +1,11 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain_test -import ( - "testing" +import () - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcutil" -) - -// BenchmarkIsCoinBase performs a simple benchmark against the IsCoinBase -// function. -func BenchmarkIsCoinBase(b *testing.B) { - tx, _ := btcutil.NewBlock(&Block100000).Tx(1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - blockchain.IsCoinBase(tx) - } -} - -// BenchmarkIsCoinBaseTx performs a simple benchmark against the IsCoinBaseTx -// function. -func BenchmarkIsCoinBaseTx(b *testing.B) { - tx := Block100000.Transactions[1] - b.ResetTimer() - for i := 0; i < b.N; i++ { - blockchain.IsCoinBaseTx(tx) - } -} +// TODO Make benchmarking tests for various functions, such as sidechain +// evaluation. diff --git a/blockchain/blocklocator.go b/blockchain/blocklocator.go index 7c9019bc..4f35e78e 100644 --- a/blockchain/blocklocator.go +++ b/blockchain/blocklocator.go @@ -1,11 +1,13 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain import ( - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // BlockLocator is used to help locate a specific block. The algorithm for @@ -23,7 +25,7 @@ import ( // // The block locator for block 17a would be the hashes of blocks: // [17a 16a 15 14 13 12 11 10 9 8 6 2 genesis] -type BlockLocator []*wire.ShaHash +type BlockLocator []*chainhash.Hash // BlockLocatorFromHash returns a block locator for the passed block hash. // See BlockLocator for details on the algotirhm used to create a block locator. @@ -35,7 +37,7 @@ type BlockLocator []*wire.ShaHash // therefore the block locator will only consist of the genesis hash // - If the passed hash is not currently known, the block locator will only // consist of the passed hash -func (b *BlockChain) BlockLocatorFromHash(hash *wire.ShaHash) BlockLocator { +func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator { // The locator contains the requested hash at the very least. locator := make(BlockLocator, 0, wire.MaxBlockLocatorsPerMsg) locator = append(locator, hash) diff --git a/blockchain/chain.go b/blockchain/chain.go index a602f1ec..1b724e82 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -13,22 +14,28 @@ import ( "sync" "time" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( // maxOrphanBlocks is the maximum number of orphan blocks that can be // queued. - maxOrphanBlocks = 100 + maxOrphanBlocks = 500 - // minMemoryNodes is the minimum number of consecutive nodes needed + // minMemoryNodesLocal is the minimum number of consecutive nodes needed // in memory in order to perform all necessary validation. It is used // to determine when it's safe to prune nodes from memory without // causing constant dynamic reloading. - minMemoryNodes = BlocksPerRetarget + minMemoryNodesLocal = 4096 + + // searchDepth is the distance in blocks to search down the blockchain + // to find some parent. + searchDepth = 2048 ) // ErrIndexAlreadyInitialized describes an error that indicates the block index @@ -49,13 +56,13 @@ type blockNode struct { children []*blockNode // hash is the double sha 256 of the block. - hash *wire.ShaHash + hash *chainhash.Hash // parentHash is the double sha 256 of the parent block. This is kept // here over simply relying on parent.hash directly since block nodes // are sparse and the parent node might not be in memory when its hash // is needed. - parentHash *wire.ShaHash + parentHash *chainhash.Hash // height is the position in the block chain. height int64 @@ -69,6 +76,19 @@ type blockNode struct { // ancestor when switching chains. inMainChain bool + // outputAmtsTotal is amount of fees in the tx tree regular of the parent plus + // the value of the coinbase, which may or may not be given to the child node + // depending on the voters. Doesn't get set until you actually attempt to + // connect the block and calculate the fees/reward for it. + // DECRED TODO: Is this actually used anywhere? If not prune it. + outputAmtsTotal int64 + + // Decred: Keep the full block header. + header wire.BlockHeader + + // VoteBits for the stake voters. + voteBits []uint16 + // Some fields from block headers to aid in best chain selection. version int32 bits uint32 @@ -79,7 +99,8 @@ type blockNode struct { // completely disconnected from the chain and the workSum value is just the work // for the passed block. The work sum is updated accordingly when the node is // inserted into a chain. -func newBlockNode(blockHeader *wire.BlockHeader, blockSha *wire.ShaHash, height int64) *blockNode { +func newBlockNode(blockHeader *wire.BlockHeader, blockSha *chainhash.Hash, + height int64, voteBits []uint16) *blockNode { // Make a copy of the hash so the node doesn't keep a reference to part // of the full block/block header preventing it from being garbage // collected. @@ -92,6 +113,7 @@ func newBlockNode(blockHeader *wire.BlockHeader, blockSha *wire.ShaHash, height version: blockHeader.Version, bits: blockHeader.Bits, timestamp: blockHeader.Timestamp, + header: *blockHeader, } return &node } @@ -100,7 +122,7 @@ func newBlockNode(blockHeader *wire.BlockHeader, blockSha *wire.ShaHash, height // is a normal block plus an expiration time to prevent caching the orphan // forever. type orphanBlock struct { - block *btcutil.Block + block *dcrutil.Block expiration time.Time } @@ -137,28 +159,39 @@ func removeChildNode(children []*blockNode, node *blockNode) []*blockNode { return children } -// BlockChain provides functions for working with the bitcoin block chain. +// BlockChain provides functions for working with the decred block chain. // It includes functionality such as rejecting duplicate blocks, ensuring blocks // follow all rules, orphan handling, checkpoint handling, and best chain // selection with reorganization. type BlockChain struct { - db database.Db - chainParams *chaincfg.Params - checkpointsByHeight map[int64]*chaincfg.Checkpoint - notifications NotificationCallback - root *blockNode - bestChain *blockNode - index map[wire.ShaHash]*blockNode - depNodes map[wire.ShaHash][]*blockNode - orphans map[wire.ShaHash]*orphanBlock - prevOrphans map[wire.ShaHash][]*orphanBlock - oldestOrphan *orphanBlock - orphanLock sync.RWMutex - blockCache map[wire.ShaHash]*btcutil.Block - noVerify bool - noCheckpoints bool - nextCheckpoint *chaincfg.Checkpoint - checkpointBlock *btcutil.Block + db database.Db + tmdb *stake.TicketDB + chainParams *chaincfg.Params + checkpointsByHeight map[int64]*chaincfg.Checkpoint + notifications NotificationCallback + minMemoryNodes int64 + blocksPerRetarget int64 + stakeValidationHeight int64 + root *blockNode + bestChain *blockNode + index map[chainhash.Hash]*blockNode + depNodes map[chainhash.Hash][]*blockNode + orphans map[chainhash.Hash]*orphanBlock + prevOrphans map[chainhash.Hash][]*orphanBlock + oldestOrphan *orphanBlock + orphanLock sync.RWMutex + blockCache map[chainhash.Hash]*dcrutil.Block + blockCacheLock sync.RWMutex + noVerify bool + noCheckpoints bool + nextCheckpoint *chaincfg.Checkpoint + checkpointBlock *dcrutil.Block +} + +// StakeValidationHeight returns the height at which proof of stake validation +// begins for proof of work block headers. +func (b *BlockChain) StakeValidationHeight() int64 { + return b.stakeValidationHeight } // DisableVerify provides a mechanism to disable transaction script validation @@ -170,12 +203,33 @@ func (b *BlockChain) DisableVerify(disable bool) { b.noVerify = disable } +// MissedTickets returns all currently missed tickets from the stake database. +// +// This function is NOT safe for concurrent access. +func (b *BlockChain) MissedTickets() (stake.SStxMemMap, error) { + missed, err := b.tmdb.DumpMissedTickets() + if err != nil { + return nil, err + } + + return missed, nil +} + +// TicketsWithAddress returns a slice of ticket hashes that are currently live +// corresponding to the given address. +// +// This function is NOT safe for concurrent access. +func (b *BlockChain) TicketsWithAddress(address dcrutil.Address) ([]chainhash.Hash, + error) { + return b.tmdb.GetLiveTicketsForAddress(address) +} + // HaveBlock returns whether or not the chain instance has the block represented // by the passed hash. This includes checking the various places a block can // be like part of the main chain, on a side chain, or in the orphan pool. // // This function is NOT safe for concurrent access. -func (b *BlockChain) HaveBlock(hash *wire.ShaHash) (bool, error) { +func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) { exists, err := b.blockExists(hash) if err != nil { return false, err @@ -193,7 +247,7 @@ func (b *BlockChain) HaveBlock(hash *wire.ShaHash) (bool, error) { // duplicate orphans and react accordingly. // // This function is safe for concurrent access. -func (b *BlockChain) IsKnownOrphan(hash *wire.ShaHash) bool { +func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool { // Protect concurrent access. Using a read lock only so multiple // readers can query without blocking each other. b.orphanLock.RLock() @@ -210,7 +264,7 @@ func (b *BlockChain) IsKnownOrphan(hash *wire.ShaHash) bool { // map of orphan blocks. // // This function is safe for concurrent access. -func (b *BlockChain) GetOrphanRoot(hash *wire.ShaHash) *wire.ShaHash { +func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash { // Protect concurrent access. Using a read lock only so multiple // readers can query without blocking each other. b.orphanLock.RLock() @@ -273,7 +327,7 @@ func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) { // It also imposes a maximum limit on the number of outstanding orphan // blocks and will remove the oldest received orphan block if the limit is // exceeded. -func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { +func (b *BlockChain) addOrphanBlock(block *dcrutil.Block) { // Remove expired orphan blocks. for _, oBlock := range b.orphans { if time.Now().After(oBlock.expiration) { @@ -283,7 +337,8 @@ func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { // Update the oldest orphan block pointer so it can be discarded // in case the orphan pool fills up. - if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) { + if b.oldestOrphan == nil || + oBlock.expiration.Before(b.oldestOrphan.expiration) { b.oldestOrphan = oBlock } } @@ -317,6 +372,46 @@ func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { return } +// getGeneration gets a generation of blocks who all have the same parent by +// taking a hash as input, locating its parent node, and then returning all +// children for that parent node including the hash passed. This can then be +// used by the mempool downstream to locate all potential block template +// parents. +func (b *BlockChain) getGeneration(h chainhash.Hash) ([]chainhash.Hash, error) { + node, err := b.findNode(&h) + + // This typically happens because the main chain has recently + // reorganized and the block the miner is looking at is on + // a fork. Usually it corrects itself after failure. + if err != nil { + return nil, fmt.Errorf("couldn't find block node in best chain: %v", + err.Error()) + } + + // Get the parent of this node. + p, err := b.getPrevNodeFromNode(node) + if err != nil { + return nil, fmt.Errorf("block is orphan (parent missing)") + } + if p == nil { + return nil, fmt.Errorf("no need to get children of genesis block") + } + + // Store all the hashes in a new slice and return them. + lenChildren := len(p.children) + allChildren := make([]chainhash.Hash, lenChildren, lenChildren) + for i := 0; i < lenChildren; i++ { + allChildren[i] = *p.children[i].hash + } + + return allChildren, nil +} + +// GetGeneration is the exported version of getGeneration. +func (b *BlockChain) GetGeneration(hash chainhash.Hash) ([]chainhash.Hash, error) { + return b.getGeneration(hash) +} + // GenerateInitialIndex is an optional function which generates the required // number of initial block nodes in an optimized fashion. This is optional // because the memory block index is sparse and previous nodes are dynamically @@ -342,7 +437,7 @@ func (b *BlockChain) GenerateInitialIndex() error { // Calculate the starting height based on the minimum number of nodes // needed in memory. - startHeight := endHeight - minMemoryNodes + startHeight := endHeight - b.minMemoryNodes if startHeight < 0 { startHeight = 0 } @@ -350,7 +445,7 @@ func (b *BlockChain) GenerateInitialIndex() error { // Loop forwards through each block loading the node into the index for // the block. // - // Due to a bug in the SQLite btcdb driver, the FetchBlockBySha call is + // Due to a bug in the SQLite dcrdb driver, the FetchBlockBySha call is // limited to a maximum number of hashes per invocation. Since SQLite // is going to be nuked eventually, the bug isn't being fixed in the // driver. In the mean time, work around the issue by calling @@ -394,20 +489,24 @@ func (b *BlockChain) GenerateInitialIndex() error { // creates a block node from it, and updates the memory block chain accordingly. // It is used mainly to dynamically load previous blocks from database as they // are needed to avoid needing to put the entire block chain in memory. -func (b *BlockChain) loadBlockNode(hash *wire.ShaHash) (*blockNode, error) { - // Load the block header and height from the db. - blockHeader, err := b.db.FetchBlockHeaderBySha(hash) - if err != nil { - return nil, err - } - blockHeight, err := b.db.FetchBlockHeightBySha(hash) +func (b *BlockChain) loadBlockNode(hash *chainhash.Hash) (*blockNode, error) { + block, err := b.db.FetchBlockBySha(hash) if err != nil { return nil, err } // Create the new block node for the block and set the work. - node := newBlockNode(blockHeader, hash, blockHeight) + voteBitsStake := make([]uint16, 0) + for _, stx := range block.STransactions() { + if is, _ := stake.IsSSGen(stx); is { + vb := stake.GetSSGenVoteBits(stx) + voteBitsStake = append(voteBitsStake, vb) + } + } + node := newBlockNode(&block.MsgBlock().Header, hash, + int64(block.MsgBlock().Header.Height), voteBitsStake) node.inMainChain = true + prevHash := &block.MsgBlock().Header.PrevBlock // Add the node to the chain. // There are several possibilities here: @@ -418,7 +517,6 @@ func (b *BlockChain) loadBlockNode(hash *wire.ShaHash) (*blockNode, error) { // therefore is an error to insert into the chain // 4) Neither 1 or 2 is true, but this is the first node being added // to the tree, so it's the root. - prevHash := &blockHeader.PrevBlock if parentNode, ok := b.index[*prevHash]; ok { // Case 1 -- This node is a child of an existing block node. // Update the node's work sum with the sum of the parent node's @@ -428,7 +526,6 @@ func (b *BlockChain) loadBlockNode(hash *wire.ShaHash) (*blockNode, error) { node.workSum = node.workSum.Add(parentNode.workSum, node.workSum) parentNode.children = append(parentNode.children, node) node.parent = parentNode - } else if childNodes, ok := b.depNodes[*hash]; ok { // Case 2 -- This node is the parent of one or more nodes. // Connect this block node to all of its children and update @@ -440,9 +537,8 @@ func (b *BlockChain) loadBlockNode(hash *wire.ShaHash) (*blockNode, error) { addChildrenWork(childNode, node.workSum) b.root = node } - } else { - // Case 3 -- The node does't have a parent and is not the parent + // Case 3 -- The node doesn't have a parent and is not the parent // of another node. This is only acceptable for the first node // inserted into the chain. Otherwise it means an arbitrary // orphan block is trying to be loaded which is not allowed. @@ -462,12 +558,67 @@ func (b *BlockChain) loadBlockNode(hash *wire.ShaHash) (*blockNode, error) { return node, nil } +// findNode finds the node scaling backwards from best chain or return an +// error. +func (b *BlockChain) findNode(nodeHash *chainhash.Hash) (*blockNode, error) { + var node *blockNode + + // Most common case; we're checking a block that wants to be connected + // on top of the current main chain. + distance := 0 + if nodeHash.IsEqual(b.bestChain.hash) { + node = b.bestChain + } else { + // Look backwards in our blockchain and try to find it in the + // parents of blocks. + foundPrev := b.bestChain + notFound := false + for !foundPrev.hash.IsEqual(b.chainParams.GenesisHash) { + if distance >= searchDepth { + notFound = true + break + } + + if foundPrev.hash.IsEqual(b.chainParams.GenesisHash) { + notFound = true + break + } + + if foundPrev.hash.IsEqual(nodeHash) { + break + } + + foundPrev = foundPrev.parent + if foundPrev == nil { + parent, err := b.loadBlockNode(&foundPrev.header.PrevBlock) + if err != nil { + return nil, err + } + + foundPrev = parent + } + + distance++ + } + + if notFound { + return nil, fmt.Errorf("couldn't find node %v in best chain", + nodeHash) + } + + node = foundPrev + } + + return node, nil +} + // getPrevNodeFromBlock returns a block node for the block previous to the // passed block (the passed block's parent). When it is already in the memory // block chain, it simply returns it. Otherwise, it loads the previous block // from the block database, creates a new block node from it, and returns it. // The returned node will be nil if the genesis block is passed. -func (b *BlockChain) getPrevNodeFromBlock(block *btcutil.Block) (*blockNode, error) { +func (b *BlockChain) getPrevNodeFromBlock(block *dcrutil.Block) (*blockNode, + error) { // Genesis block. prevHash := &block.MsgBlock().Header.PrevBlock if prevHash.IsEqual(zeroHash) { @@ -481,7 +632,7 @@ func (b *BlockChain) getPrevNodeFromBlock(block *btcutil.Block) (*blockNode, err // Dynamically load the previous block from the block database, create // a new block node for it, and update the memory chain accordingly. - prevBlockNode, err := b.loadBlockNode(prevHash) + prevBlockNode, err := b.findNode(prevHash) if err != nil { return nil, err } @@ -508,7 +659,7 @@ func (b *BlockChain) getPrevNodeFromNode(node *blockNode) (*blockNode, error) { // Dynamically load the previous block from the block database, create // a new block node for it, and update the memory chain accordingly. - prevBlockNode, err := b.loadBlockNode(node.parentHash) + prevBlockNode, err := b.findNode(node.parentHash) if err != nil { return nil, err } @@ -516,6 +667,75 @@ func (b *BlockChain) getPrevNodeFromNode(node *blockNode) (*blockNode, error) { return prevBlockNode, nil } +// GetNodeAtHeightFromTopNode goes backwards through a node until it a reaches +// the node with a desired block height; it returns this block. The benefit is +// this works for both the main chain and the side chain. +func (b *BlockChain) getNodeAtHeightFromTopNode(node *blockNode, + toTraverse int64) (*blockNode, error) { + oldNode := node + var err error + + for i := 0; i < int(toTraverse); i++ { + // Get the previous block node. + oldNode, err = b.getPrevNodeFromNode(oldNode) + if err != nil { + return nil, err + } + + if oldNode == nil { + return nil, fmt.Errorf("unable to obtain previous node; " + + "ancestor is genesis block") + } + } + + return oldNode, nil +} + +// getBlockFromHash searches the internal chain block stores and the database in +// an attempt to find the block. If it finds the block, it returns it. +func (b *BlockChain) getBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block, + error) { + // Check block cache + b.blockCacheLock.RLock() + blockSidechain, existsSidechain := b.blockCache[*hash] + b.blockCacheLock.RUnlock() + if existsSidechain { + return blockSidechain, nil + } + + // Check orphan cache + b.orphanLock.RLock() + orphan, existsOrphans := b.orphans[*hash] + b.orphanLock.RUnlock() + if existsOrphans { + return orphan.block, nil + } + + // Check main chain + blockMainchain, errFetchMainchain := b.db.FetchBlockBySha(hash) + existsMainchain := (errFetchMainchain == nil) || (blockMainchain != nil) + if existsMainchain { + return blockMainchain, nil + } + + // Implicit !existsMainchain && !existsSidechain && !existsOrphans + return nil, fmt.Errorf("unable to find block %v in "+ + "side chain cache, orphan cache, and main chain db", hash) +} + +// GetBlockFromHash is the generalized and exported version of getBlockFromHash. +func (b *BlockChain) GetBlockFromHash(hash *chainhash.Hash) (*dcrutil.Block, + error) { + return b.getBlockFromHash(hash) +} + +// GetTopBlock returns the current block at HEAD on the blockchain. Needed +// for mining in the daemon. +func (b *BlockChain) GetTopBlock() (dcrutil.Block, error) { + block, err := b.getBlockFromHash(b.bestChain.hash) + return *block, err +} + // removeBlockNode removes the passed block node from the memory chain by // unlinking all of its children and removing it from the the node and // dependency indices. @@ -567,7 +787,7 @@ func (b *BlockChain) pruneBlockNodes() error { // the latter loads the node and the goal is to find nodes still in // memory that can be pruned. newRootNode := b.bestChain - for i := int64(0); i < minMemoryNodes-1 && newRootNode != nil; i++ { + for i := int64(0); i < b.minMemoryNodes-1 && newRootNode != nil; i++ { newRootNode = newRootNode.parent } @@ -603,14 +823,20 @@ func (b *BlockChain) pruneBlockNodes() error { return nil } +// GetCurrentBlockHeader returns the block header of the block at HEAD. +// This function is NOT safe for concurrent access. +func (b *BlockChain) GetCurrentBlockHeader() *wire.BlockHeader { + return &b.bestChain.header +} + // isMajorityVersion determines if a previous number of blocks in the chain // starting with startNode are at least the minimum passed version. func (b *BlockChain) isMajorityVersion(minVer int32, startNode *blockNode, - numRequired uint64) bool { + numRequired int32) bool { - numFound := uint64(0) + numFound := int32(0) iterNode := startNode - for i := uint64(0); i < b.chainParams.BlockUpgradeNumToCheck && + for i := int32(0); i < b.chainParams.CurrentBlockVersion && numFound < numRequired && iterNode != nil; i++ { // This node has a version that is at least the minimum version. if iterNode.version >= minVer { @@ -701,12 +927,13 @@ func (b *BlockChain) CalcPastMedianTime() (time.Time, error) { // returned list of block nodes) in order to reorganize the chain such that the // passed node is the new end of the main chain. The lists will be empty if the // passed node is not on a side chain. -func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) { +func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List, + error) { // Nothing to detach or attach if there is no node. attachNodes := list.New() detachNodes := list.New() if node == nil { - return detachNodes, attachNodes + return detachNodes, attachNodes, nil } // Find the fork point (if any) adding each block to the list of nodes @@ -714,35 +941,41 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List // so they are attached in the appropriate order when iterating the list // later. ancestor := node - for ; ancestor.parent != nil; ancestor = ancestor.parent { + for ancestor.parent != nil { if ancestor.inMainChain { break } attachNodes.PushFront(ancestor) - } - // TODO(davec): Use prevNodeFromNode function in case the requested - // node is further back than the what is in memory. This shouldn't - // happen in the normal course of operation, but the ability to fetch - // input transactions of arbitrary blocks will likely to be exposed at - // some point and that could lead to an issue here. + var err error + ancestor, err = b.getPrevNodeFromNode(ancestor) + if err != nil { + return nil, nil, err + } + } // Start from the end of the main chain and work backwards until the // common ancestor adding each block to the list of nodes to detach from // the main chain. - for n := b.bestChain; n != nil && n.parent != nil; n = n.parent { + for n := b.bestChain; n != nil && n.parent != nil; { if n.hash.IsEqual(ancestor.hash) { break } detachNodes.PushBack(n) + + var err error + n, err = b.getPrevNodeFromNode(n) + if err != nil { + return nil, nil, err + } } - return detachNodes, attachNodes + return detachNodes, attachNodes, nil } // connectBlock handles connecting the passed node/block to the end of the main // (best) chain. -func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block) error { +func (b *BlockChain) connectBlock(node *blockNode, block *dcrutil.Block) error { // Make sure it's extending the end of the best chain. prevHash := &block.MsgBlock().Header.PrevBlock if b.bestChain != nil && !prevHash.IsEqual(b.bestChain.hash) { @@ -750,9 +983,47 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block) error { "that extends the main chain") } + var err error + + // Insert block into ticket database if we're the point where tickets begin to + // mature. Note that if the block is inserted into tmdb and then insertion + // into DB fails, the two database will be on different HEADs. This needs + // to be handled correctly in the near future. + if node.height >= b.chainParams.StakeEnabledHeight { + spentAndMissedTickets, newTickets, _, err := + b.tmdb.InsertBlock(block) + if err != nil { + return err + } + + nextStakeDiff, err := b.calcNextRequiredStakeDifficulty(node) + if err != nil { + return err + } + + // Notify of spent and missed tickets + b.sendNotification(NTSpentAndMissedTickets, + &TicketNotificationsData{*node.hash, + node.height, + nextStakeDiff, + spentAndMissedTickets}) + // Notify of new tickets + b.sendNotification(NTNewTickets, + &TicketNotificationsData{*node.hash, + node.height, + nextStakeDiff, + newTickets}) + } + // Insert the block into the database which houses the main chain. - _, err := b.db.InsertBlock(block) + _, err = b.db.InsertBlock(block) if err != nil { + // Attempt to restore TicketDb if this fails. + _, _, _, errRemove := b.tmdb.RemoveBlockToHeight(node.height - 1) + if errRemove != nil { + return errRemove + } + return err } @@ -765,44 +1036,90 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block) error { // This node is now the end of the best chain. b.bestChain = node + // Get the parent block. + parent, err := b.getBlockFromHash(node.parent.hash) + if err != nil { + return err + } + + // Assemble the current block and the parent into a slice. + blockAndParent := []*dcrutil.Block{block, parent} + // Notify the caller that the block was connected to the main chain. // The caller would typically want to react with actions such as // updating wallets. - b.sendNotification(NTBlockConnected, block) + b.sendNotification(NTBlockConnected, blockAndParent) return nil } // disconnectBlock handles disconnecting the passed node/block from the end of // the main (best) chain. -func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block) error { +func (b *BlockChain) disconnectBlock(node *blockNode, block *dcrutil.Block) error { // Make sure the node being disconnected is the end of the best chain. if b.bestChain == nil || !node.hash.IsEqual(b.bestChain.hash) { return fmt.Errorf("disconnectBlock must be called with the " + "block at the end of the main chain") } + // Insert block into ticket database if we're the point where tickets begin to + // mature. + maturityHeight := int64(b.chainParams.TicketMaturity) + + int64(b.chainParams.CoinbaseMaturity) + + // Remove from ticket database. + if node.height-1 >= maturityHeight { + _, _, _, err := b.tmdb.RemoveBlockToHeight(node.height - 1) + if err != nil { + return err + } + } + // Remove the block from the database which houses the main chain. + // if we're above the point in which the stake db is enabled. prevNode, err := b.getPrevNodeFromNode(node) if err != nil { + // Attempt to restore TicketDb if this fails. + _, _, _, errReinsert := b.tmdb.InsertBlock(block) + if errReinsert != nil { + return errReinsert + } + return err } err = b.db.DropAfterBlockBySha(prevNode.hash) if err != nil { + // Attempt to restore TicketDb if this fails. + _, _, _, errReinsert := b.tmdb.InsertBlock(block) + if errReinsert != nil { + return errReinsert + } + return err } // Put block in the side chain cache. node.inMainChain = false + b.blockCacheLock.Lock() b.blockCache[*node.hash] = block + b.blockCacheLock.Unlock() // This node's parent is now the end of the best chain. b.bestChain = node.parent + // Get the parent block. + parent, err := b.getBlockFromHash(node.parent.hash) + if err != nil { + return err + } + + // Assemble the current block and the parent into a slice. + blockAndParent := []*dcrutil.Block{block, parent} + // Notify the caller that the block was disconnected from the main // chain. The caller would typically want to react with actions such as // updating wallets. - b.sendNotification(NTBlockDisconnected, block) + b.sendNotification(NTBlockDisconnected, blockAndParent) return nil } @@ -818,11 +1135,19 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block) erro // The flags modify the behavior of this function as follows: // - BFDryRun: Only the checks which ensure the reorganize can be completed // successfully are performed. The chain is not reorganized. -func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags BehaviorFlags) error { +// Decred: TODO Implement and debug reorg behaviour with ticket database. +func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, + flags BehaviorFlags) error { + oldHash := b.bestChain.hash + oldHeight := b.bestChain.height + // Ensure all of the needed side chain blocks are in the cache. for e := attachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) - if _, exists := b.blockCache[*n.hash]; !exists { + b.blockCacheLock.RLock() + _, exists := b.blockCache[*n.hash] + b.blockCacheLock.RUnlock() + if !exists { return fmt.Errorf("block %v is missing from the side "+ "chain block cache", n.hash) } @@ -840,14 +1165,24 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags // at least a couple of ways accomplish that rollback, but both involve // tweaking the chain. This approach catches these issues before ever // modifying the chain. + var topBlock *blockNode for e := attachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) + b.blockCacheLock.RLock() block := b.blockCache[*n.hash] + b.blockCacheLock.RUnlock() + log.Debugf("Evaluating block %v (height %v) for correctness", + block.Sha(), block.Height()) err := b.checkConnectBlock(n, block) if err != nil { return err } + topBlock = n } + newHash := topBlock.hash + newHeight := topBlock.height + log.Debugf("New best chain validation completed successfully, " + + "commencing with the reorganization.") // Skip disconnecting and connecting the blocks when running with the // dry run flag set. @@ -855,6 +1190,15 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags return nil } + // Send a notification that a blockchain reorganization is in progress. + reorgData := &ReorganizationNtfnsData{ + *oldHash, + oldHeight, + *newHash, + newHeight, + } + b.sendNotification(NTReorganization, reorgData) + // Disconnect blocks from the main chain. for e := detachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) @@ -871,36 +1215,106 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags // Connect the new best chain blocks. for e := attachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) + b.blockCacheLock.RLock() block := b.blockCache[*n.hash] + b.blockCacheLock.RUnlock() err := b.connectBlock(n, block) if err != nil { return err } + b.blockCacheLock.Lock() delete(b.blockCache, *n.hash) + b.blockCacheLock.Unlock() } // Log the point where the chain forked. firstAttachNode := attachNodes.Front().Value.(*blockNode) forkNode, err := b.getPrevNodeFromNode(firstAttachNode) if err == nil { - log.Infof("REORGANIZE: Chain forks at %v", forkNode.hash) + log.Infof("REORGANIZE: Chain forks at %v, height %v", + forkNode.hash, + forkNode.height) } // Log the old and new best chain heads. - firstDetachNode := detachNodes.Front().Value.(*blockNode) lastAttachNode := attachNodes.Back().Value.(*blockNode) - log.Infof("REORGANIZE: Old best chain head was %v", firstDetachNode.hash) - log.Infof("REORGANIZE: New best chain head is %v", lastAttachNode.hash) + log.Infof("REORGANIZE: Old best chain head was %v, height %v", + oldHash, + oldHeight) + log.Infof("REORGANIZE: New best chain head is %v, height %v", + lastAttachNode.hash, + lastAttachNode.height) return nil } +// forceReorganizationToBlock forces a reorganization of the block chain to the +// block hash requested, so long as it matches up with the current organization +// of the best chain. +func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash, + newBest chainhash.Hash, + timeSource MedianTimeSource) error { + if formerBest.IsEqual(&newBest) { + return fmt.Errorf("can't reorganize to the same block") + } + + formerBestNode := b.bestChain + + // We can't reorganize the chain unless our head block matches up with + // b.bestChain. + if !formerBestNode.hash.IsEqual(&formerBest) { + return ruleError(ErrForceReorgWrongChain, "tried to force reorg "+ + "on wrong chain") + } + + var newBestNode *blockNode + for _, n := range formerBestNode.parent.children { + if n.hash.IsEqual(&newBest) { + newBestNode = n + } + } + + // Child to reorganize to is missing. + if newBestNode == nil { + ruleError(ErrForceReorgMissingChild, "missing child of common parent "+ + "for forced reorg") + } + + newBestBlock, err := b.getBlockFromHash(&newBest) + + // Check to make sure our forced-in node validates correctly. + err = checkBlockSanity(newBestBlock, + timeSource, + BFNone, + b.chainParams) + + err = b.checkConnectBlock(newBestNode, newBestBlock) + if err != nil { + return err + } + + attach, detach, err := b.getReorganizeNodes(newBestNode) + if err != nil { + return err + } + + return b.reorganizeChain(attach, detach, BFNone) +} + +// ForceHeadReorganization is the exported version of forceHeadReorganization. +func (b *BlockChain) ForceHeadReorganization(formerBest chainhash.Hash, + newBest chainhash.Hash, timeSource MedianTimeSource) error { + return b.forceHeadReorganization(formerBest, newBest, timeSource) +} + // connectBestChain handles connecting the passed block to the chain while // respecting proper chain selection according to the chain with the most // proof of work. In the typical case, the new block simply extends the main // chain. However, it may also be extending (or creating) a side chain (fork) // which may or may not end up becoming the main chain depending on which fork // cumulatively has the most proof of work. +// Returns a boolean that indicates where the block passed was on the main +// chain or a sidechain (true = main chain). // // The flags modify the behavior of this function as follows: // - BFFastAdd: Avoids the call to checkConnectBlock which does several @@ -908,7 +1322,8 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags // - BFDryRun: Prevents the block from being connected and avoids modifying the // state of the memory chain index. Also, any log messages related to // modifying the state are avoided. -func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, flags BehaviorFlags) error { +func (b *BlockChain) connectBestChain(node *blockNode, block *dcrutil.Block, + flags BehaviorFlags) (bool, error) { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun @@ -923,19 +1338,19 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla if !fastAdd { err := b.checkConnectBlock(node, block) if err != nil { - return err + return false, err } } // Don't connect the block if performing a dry run. if dryRun { - return nil + return true, nil } // Connect the block to the main chain. err := b.connectBlock(node, block) if err != nil { - return err + return false, err } // Connect the parent node to this node. @@ -943,7 +1358,20 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla node.parent.children = append(node.parent.children, node) } - return nil + validateStr := "validating" + txTreeRegularValid := dcrutil.IsFlagSet16(node.header.VoteBits, + dcrutil.BlockValid) + if !txTreeRegularValid { + validateStr = "invalidating" + } + + log.Debugf("Block %v (height %v) connected to the main chain, "+ + "%v the previous block", + node.hash, + node.height, + validateStr) + + return true, nil } if fastAdd { log.Warnf("fastAdd set in the side chain case? %v\n", @@ -957,7 +1385,9 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla if !dryRun { log.Debugf("Adding block %v to side chain cache", node.hash) } + b.blockCacheLock.Lock() b.blockCache[*node.hash] = block + b.blockCacheLock.Unlock() b.index[*node.hash] = node // Connect the parent node to this node. @@ -973,7 +1403,9 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla node.parent.children = children delete(b.index, *node.hash) + b.blockCacheLock.Lock() delete(b.blockCache, *node.hash) + b.blockCacheLock.Unlock() }() } @@ -982,29 +1414,41 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla if node.workSum.Cmp(b.bestChain.workSum) <= 0 { // Skip Logging info when the dry run flag is set. if dryRun { - return nil + return false, nil } // Find the fork point. fork := node - for ; fork.parent != nil; fork = fork.parent { + for fork.parent != nil { if fork.inMainChain { break } + var err error + fork, err = b.getPrevNodeFromNode(fork) + if err != nil { + return false, err + } } // Log information about how the block is forking the chain. if fork.hash.IsEqual(node.parent.hash) { - log.Infof("FORK: Block %v forks the chain at height %d"+ - "/block %v, but does not cause a reorganize", - node.hash, fork.height, fork.hash) + log.Infof("FORK: Block %v (height %v) forks the chain at height "+ + "%d/block %v, but does not cause a reorganize", + node.hash, + node.height, + fork.height, + fork.hash) } else { - log.Infof("EXTEND FORK: Block %v extends a side chain "+ - "which forks the chain at height %d/block %v", - node.hash, fork.height, fork.hash) + log.Infof("EXTEND FORK: Block %v (height %v) extends a side chain "+ + "which forks the chain at height "+ + "%d/block %v", + node.hash, + node.height, + fork.height, + fork.hash) } - return nil + return false, nil } // We're extending (or creating) a side chain and the cumulative work @@ -1014,19 +1458,22 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla // blocks that form the (now) old fork from the main chain, and attach // the blocks that form the new chain to the main chain starting at the // common ancenstor (the point where the chain forked). - detachNodes, attachNodes := b.getReorganizeNodes(node) + detachNodes, attachNodes, err := b.getReorganizeNodes(node) + if err != nil { + return false, nil + } // Reorganize the chain. if !dryRun { log.Infof("REORGANIZE: Block %v is causing a reorganize.", node.hash) } - err := b.reorganizeChain(detachNodes, attachNodes, flags) + err = b.reorganizeChain(detachNodes, attachNodes, flags) if err != nil { - return err + return false, err } - return nil + return true, nil } // IsCurrent returns whether or not the chain believes it is current. Several @@ -1050,9 +1497,10 @@ func (b *BlockChain) IsCurrent(timeSource MedianTimeSource) bool { } // Not current if the latest best block has a timestamp before 24 hours - // ago. + // ago and is on mainnet. minus24Hours := timeSource.AdjustedTime().Add(-24 * time.Hour) - if b.bestChain.timestamp.Before(minus24Hours) { + if b.bestChain.timestamp.Before(minus24Hours) && + b.chainParams.Name == "mainnet" { return false } @@ -1061,13 +1509,22 @@ func (b *BlockChain) IsCurrent(timeSource MedianTimeSource) bool { return true } -// New returns a BlockChain instance for the passed bitcoin network using the +// maxInt64 returns the maximum of two 64-bit integers. +func maxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +// New returns a BlockChain instance for the passed decred network using the // provided backing database. It accepts a callback on which notifications // will be sent when various events take place. See the documentation for // Notification and NotificationType for details on the types and contents of // notifications. The provided callback can be nil if the caller is not // interested in receiving notifications. -func New(db database.Db, params *chaincfg.Params, c NotificationCallback) *BlockChain { +func New(db database.Db, tmdb *stake.TicketDB, params *chaincfg.Params, + c NotificationCallback) *BlockChain { // Generate a checkpoint by height map from the provided checkpoints. var checkpointsByHeight map[int64]*chaincfg.Checkpoint if len(params.Checkpoints) > 0 { @@ -1078,18 +1535,32 @@ func New(db database.Db, params *chaincfg.Params, c NotificationCallback) *Block } } + // BlocksPerRetarget is the number of blocks between each difficulty + // retarget. It is calculated based on the retargeting window sizes + // in blocks for both PoW and PoS. + blocksPerRetargetPoW := int64(params.WorkDiffWindowSize * + params.WorkDiffWindows) + blocksPerRetargetPoS := int64(params.StakeDiffWindowSize * + params.StakeDiffWindows) + blocksPerRetarget := maxInt64(blocksPerRetargetPoW, blocksPerRetargetPoS) + b := BlockChain{ - db: db, - chainParams: params, - checkpointsByHeight: checkpointsByHeight, - notifications: c, - root: nil, - bestChain: nil, - index: make(map[wire.ShaHash]*blockNode), - depNodes: make(map[wire.ShaHash][]*blockNode), - orphans: make(map[wire.ShaHash]*orphanBlock), - prevOrphans: make(map[wire.ShaHash][]*orphanBlock), - blockCache: make(map[wire.ShaHash]*btcutil.Block), + db: db, + tmdb: tmdb, + chainParams: params, + checkpointsByHeight: checkpointsByHeight, + notifications: c, + blocksPerRetarget: blocksPerRetarget, + minMemoryNodes: minMemoryNodesLocal, + stakeValidationHeight: params.StakeValidationHeight, + root: nil, + bestChain: nil, + index: make(map[chainhash.Hash]*blockNode), + depNodes: make(map[chainhash.Hash][]*blockNode), + orphans: make(map[chainhash.Hash]*orphanBlock), + prevOrphans: make(map[chainhash.Hash][]*orphanBlock), + blockCache: make(map[chainhash.Hash]*dcrutil.Block), } + return &b } diff --git a/blockchain/chain_test.go b/blockchain/chain_test.go index 12af89a5..f04e3925 100644 --- a/blockchain/chain_test.go +++ b/blockchain/chain_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,109 +7,10 @@ package blockchain_test import ( "testing" - - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" ) // TestHaveBlock tests the HaveBlock API to ensure proper functionality. func TestHaveBlock(t *testing.T) { - // Load up blocks such that there is a side chain. - // (genesis block) -> 1 -> 2 -> 3 -> 4 - // \-> 3a - testFiles := []string{ - "blk_0_to_4.dat.bz2", - "blk_3A.dat.bz2", - } - - var blocks []*btcutil.Block - for _, file := range testFiles { - blockTmp, err := loadBlocks(file) - if err != nil { - t.Errorf("Error loading file: %v\n", err) - return - } - for _, block := range blockTmp { - blocks = append(blocks, block) - } - } - - // Create a new database and chain instance to run tests against. - chain, teardownFunc, err := chainSetup("haveblock") - if err != nil { - t.Errorf("Failed to setup chain instance: %v", err) - return - } - defer teardownFunc() - - // Since we're not dealing with the real block chain, disable - // checkpoints and set the coinbase maturity to 1. - chain.DisableCheckpoints(true) - blockchain.TstSetCoinbaseMaturity(1) - - timeSource := blockchain.NewMedianTime() - for i := 1; i < len(blocks); i++ { - isOrphan, err := chain.ProcessBlock(blocks[i], timeSource, - blockchain.BFNone) - if err != nil { - t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) - return - } - if isOrphan { - t.Errorf("ProcessBlock incorrectly returned block %v "+ - "is an orphan\n", i) - return - } - } - - // Insert an orphan block. - isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000), - timeSource, blockchain.BFNone) - if err != nil { - t.Errorf("Unable to process block: %v", err) - return - } - if !isOrphan { - t.Errorf("ProcessBlock indicated block is an not orphan when " + - "it should be\n") - return - } - - tests := []struct { - hash string - want bool - }{ - // Genesis block should be present (in the main chain). - {hash: chaincfg.MainNetParams.GenesisHash.String(), want: true}, - - // Block 3a should be present (on a side chain). - {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, - - // Block 100000 should be present (as an orphan). - {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, - - // Random hashes should not be availble. - {hash: "123", want: false}, - } - - for i, test := range tests { - hash, err := wire.NewShaHashFromStr(test.hash) - if err != nil { - t.Errorf("NewShaHashFromStr: %v", err) - continue - } - - result, err := chain.HaveBlock(hash) - if err != nil { - t.Errorf("HaveBlock #%d unexpected error: %v", i, err) - return - } - if result != test.want { - t.Errorf("HaveBlock #%d got %v want %v", i, result, - test.want) - continue - } - } + // TODO Come up with some kind of new test for this portion of the API? + // HaveBlock is already tested in the reorganization test. } diff --git a/blockchain/checkpoints.go b/blockchain/checkpoints.go index d5840f11..7d382514 100644 --- a/blockchain/checkpoints.go +++ b/blockchain/checkpoints.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,10 +8,10 @@ package blockchain import ( "fmt" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrutil" ) // CheckpointConfirmations is the number of blocks before the end of the current @@ -21,8 +22,8 @@ const CheckpointConfirmations = 2016 // wire.ShaHash. It only differs from the one available in wire in that // it ignores the error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newShaHashFromStr(hexStr string) *wire.ShaHash { - sha, _ := wire.NewShaHashFromStr(hexStr) +func newShaHashFromStr(hexStr string) *chainhash.Hash { + sha, _ := chainhash.NewHashFromStr(hexStr) return sha } @@ -59,7 +60,7 @@ func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint { // verifyCheckpoint returns whether the passed block height and hash combination // match the hard-coded checkpoint data. It also returns true if there is no // checkpoint data for the passed block height. -func (b *BlockChain) verifyCheckpoint(height int64, hash *wire.ShaHash) bool { +func (b *BlockChain) verifyCheckpoint(height int64, hash *chainhash.Hash) bool { if b.noCheckpoints || len(b.chainParams.Checkpoints) == 0 { return true } @@ -83,7 +84,7 @@ func (b *BlockChain) verifyCheckpoint(height int64, hash *wire.ShaHash) bool { // available in the downloaded portion of the block chain and returns the // associated block. It returns nil if a checkpoint can't be found (this should // really only happen for blocks before the first checkpoint). -func (b *BlockChain) findPreviousCheckpoint() (*btcutil.Block, error) { +func (b *BlockChain) findPreviousCheckpoint() (*dcrutil.Block, error) { if b.noCheckpoints || len(b.chainParams.Checkpoints) == 0 { return nil, nil } @@ -187,12 +188,12 @@ func (b *BlockChain) findPreviousCheckpoint() (*btcutil.Block, error) { // isNonstandardTransaction determines whether a transaction contains any // scripts which are not one of the standard types. -func isNonstandardTransaction(tx *btcutil.Tx) bool { +func isNonstandardTransaction(tx *dcrutil.Tx) bool { // TODO(davec): Should there be checks for the input signature scripts? // Check all of the output public key scripts for non-standard scripts. for _, txOut := range tx.MsgTx().TxOut { - scriptClass := txscript.GetScriptClass(txOut.PkScript) + scriptClass := txscript.GetScriptClass(txOut.Version, txOut.PkScript) if scriptClass == txscript.NonStandardTy { return true } @@ -215,7 +216,7 @@ func isNonstandardTransaction(tx *btcutil.Tx) bool { // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. -func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { +func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) { // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") diff --git a/blockchain/common.go b/blockchain/common.go new file mode 100644 index 00000000..cbc53a61 --- /dev/null +++ b/blockchain/common.go @@ -0,0 +1,645 @@ +// common.go +package blockchain + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +// DebugBlockHeaderString dumps a verbose message containing information about +// the block header of a block. +func DebugBlockHeaderString(chainParams *chaincfg.Params, block *dcrutil.Block) string { + bh := block.MsgBlock().Header + + var buffer bytes.Buffer + + str := fmt.Sprintf("Version: %v\n", bh.Version) + buffer.WriteString(str) + + str = fmt.Sprintf("Previous block: %v\n", bh.PrevBlock) + buffer.WriteString(str) + + str = fmt.Sprintf("Merkle root (reg): %v\n", bh.MerkleRoot) + buffer.WriteString(str) + + str = fmt.Sprintf("Merkle root (stk): %v\n", bh.StakeRoot) + buffer.WriteString(str) + + str = fmt.Sprintf("VoteBits: %v\n", bh.VoteBits) + buffer.WriteString(str) + + str = fmt.Sprintf("FinalState: %v\n", bh.FinalState) + buffer.WriteString(str) + + str = fmt.Sprintf("Voters: %v\n", bh.Voters) + buffer.WriteString(str) + + str = fmt.Sprintf("FreshStake: %v\n", bh.FreshStake) + buffer.WriteString(str) + + str = fmt.Sprintf("Revocations: %v\n", bh.Revocations) + buffer.WriteString(str) + + str = fmt.Sprintf("PoolSize: %v\n", bh.PoolSize) + buffer.WriteString(str) + + str = fmt.Sprintf("Timestamp: %v\n", bh.Timestamp) + buffer.WriteString(str) + + bitsBig := CompactToBig(bh.Bits) + if bitsBig.Cmp(bigZero) != 0 { + bitsBig.Div(chainParams.PowLimit, bitsBig) + } + diff := bitsBig.Int64() + str = fmt.Sprintf("Bits: %v (Difficulty: %v)\n", bh.Bits, diff) + buffer.WriteString(str) + + str = fmt.Sprintf("SBits: %v (In coins: %v)\n", bh.SBits, + float64(bh.SBits)/dcrutil.AtomsPerCoin) + buffer.WriteString(str) + + str = fmt.Sprintf("Nonce: %v \n", bh.Nonce) + buffer.WriteString(str) + + str = fmt.Sprintf("Height: %v \n", bh.Height) + buffer.WriteString(str) + + str = fmt.Sprintf("Size: %v \n", bh.Size) + buffer.WriteString(str) + + return buffer.String() +} + +// DebugBlockString dumps a verbose message containing information about +// the transactions of a block. +func DebugBlockString(block *dcrutil.Block) string { + if block == nil { + return "block pointer nil" + } + + var buffer bytes.Buffer + + hash := block.Sha() + + str := fmt.Sprintf("Block Header: %v Height: %v \n", + hash, block.Height()) + buffer.WriteString(str) + + str = fmt.Sprintf("Block contains %v regular transactions "+ + "and %v stake transactions \n", + len(block.Transactions()), + len(block.STransactions())) + buffer.WriteString(str) + + str = fmt.Sprintf("List of regular transactions \n") + buffer.WriteString(str) + + for i, tx := range block.Transactions() { + str = fmt.Sprintf("Index: %v, Hash: %v \n", i, tx.Sha()) + buffer.WriteString(str) + } + + if len(block.STransactions()) == 0 { + return buffer.String() + } + + str = fmt.Sprintf("List of stake transactions \n") + buffer.WriteString(str) + + for i, stx := range block.STransactions() { + txTypeStr := "" + txType := stake.DetermineTxType(stx) + switch txType { + case stake.TxTypeSStx: + txTypeStr = "SStx" + case stake.TxTypeSSGen: + txTypeStr = "SSGen" + case stake.TxTypeSSRtx: + txTypeStr = "SSRtx" + default: + txTypeStr = "Error" + } + + str = fmt.Sprintf("Index: %v, Type: %v, Hash: %v \n", + i, txTypeStr, stx.Sha()) + buffer.WriteString(str) + } + + return buffer.String() +} + +// DebugMsgTxString dumps a verbose message containing information about the +// contents of a transaction. +func DebugMsgTxString(msgTx *wire.MsgTx) string { + tx := dcrutil.NewTx(msgTx) + isSStx, _ := stake.IsSStx(tx) + isSSGen, _ := stake.IsSSGen(tx) + var sstxType []bool + var sstxPkhs [][]byte + var sstxAmts []int64 + var sstxRules [][]bool + var sstxLimits [][]uint16 + + if isSStx { + sstxType, sstxPkhs, sstxAmts, _, sstxRules, sstxLimits = + stake.GetSStxStakeOutputInfo(tx) + } + + var buffer bytes.Buffer + + hash := msgTx.TxSha() + str := fmt.Sprintf("Transaction hash: %v, Version %v, Locktime: %v, "+ + "Expiry %v\n\n", hash, msgTx.Version, msgTx.LockTime, msgTx.Expiry) + buffer.WriteString(str) + + str = fmt.Sprintf("==INPUTS==\nNumber of inputs: %v\n\n", + len(msgTx.TxIn)) + buffer.WriteString(str) + + for i, input := range msgTx.TxIn { + str = fmt.Sprintf("Input number: %v\n", i) + buffer.WriteString(str) + + str = fmt.Sprintf("Previous outpoint hash: %v, ", + input.PreviousOutPoint.Hash) + buffer.WriteString(str) + + str = fmt.Sprintf("Previous outpoint index: %v, ", + input.PreviousOutPoint.Index) + buffer.WriteString(str) + + str = fmt.Sprintf("Previous outpoint tree: %v \n", + input.PreviousOutPoint.Tree) + buffer.WriteString(str) + + str = fmt.Sprintf("Sequence: %v \n", + input.Sequence) + buffer.WriteString(str) + + str = fmt.Sprintf("ValueIn: %v \n", + input.ValueIn) + buffer.WriteString(str) + + str = fmt.Sprintf("BlockHeight: %v \n", + input.BlockHeight) + buffer.WriteString(str) + + str = fmt.Sprintf("BlockIndex: %v \n", + input.BlockIndex) + buffer.WriteString(str) + + str = fmt.Sprintf("Raw signature script: %x \n", input.SignatureScript) + buffer.WriteString(str) + + sigScr, _ := txscript.DisasmString(input.SignatureScript) + str = fmt.Sprintf("Disasmed signature script: %v \n\n", + sigScr) + buffer.WriteString(str) + } + + str = fmt.Sprintf("==OUTPUTS==\nNumber of outputs: %v\n\n", + len(msgTx.TxOut)) + buffer.WriteString(str) + + for i, output := range msgTx.TxOut { + str = fmt.Sprintf("Output number: %v\n", i) + buffer.WriteString(str) + + coins := float64(output.Value) / 1e8 + str = fmt.Sprintf("Output amount: %v atoms or %v coins\n", output.Value, + coins) + buffer.WriteString(str) + + // SStx OP_RETURNs, dump pkhs and amts committed + if isSStx && i != 0 && i%2 == 1 { + coins := float64(sstxAmts[i/2]) / 1e8 + str = fmt.Sprintf("SStx commit amount: %v atoms or %v coins\n", + sstxAmts[i/2], coins) + buffer.WriteString(str) + str = fmt.Sprintf("SStx commit address: %x\n", + sstxPkhs[i/2]) + buffer.WriteString(str) + str = fmt.Sprintf("SStx address type is P2SH: %v\n", + sstxType[i/2]) + buffer.WriteString(str) + + str = fmt.Sprintf("SStx all address types is P2SH: %v\n", + sstxType) + buffer.WriteString(str) + + str = fmt.Sprintf("Voting is fee limited: %v\n", + sstxLimits[i/2][0]) + buffer.WriteString(str) + if sstxRules[i/2][0] { + str = fmt.Sprintf("Voting limit imposed: %v\n", + sstxLimits[i/2][0]) + buffer.WriteString(str) + } + + str = fmt.Sprintf("Revoking is fee limited: %v\n", + sstxRules[i/2][1]) + buffer.WriteString(str) + + if sstxRules[i/2][1] { + str = fmt.Sprintf("Voting limit imposed: %v\n", + sstxLimits[i/2][1]) + buffer.WriteString(str) + } + } + + // SSGen block/block height OP_RETURN. + if isSSGen && i == 0 { + blkHash, blkHeight, _ := stake.GetSSGenBlockVotedOn(tx) + str = fmt.Sprintf("SSGen block hash voted on: %v, height: %v\n", + blkHash, blkHeight) + buffer.WriteString(str) + } + + if isSSGen && i == 1 { + vb := stake.GetSSGenVoteBits(tx) + str = fmt.Sprintf("SSGen vote bits: %v\n", vb) + buffer.WriteString(str) + } + + str = fmt.Sprintf("Raw script: %x \n", output.PkScript) + buffer.WriteString(str) + + scr, _ := txscript.DisasmString(output.PkScript) + str = fmt.Sprintf("Disasmed script: %v \n\n", scr) + buffer.WriteString(str) + } + + return buffer.String() +} + +// DebugTicketDataString writes the contents of a ticket data struct +// as a string. +func DebugTicketDataString(td *stake.TicketData) string { + var buffer bytes.Buffer + + str := fmt.Sprintf("SStxHash: %v\n", td.SStxHash) + buffer.WriteString(str) + + str = fmt.Sprintf("SpendHash: %v\n", td.SpendHash) + buffer.WriteString(str) + + str = fmt.Sprintf("BlockHeight: %v\n", td.BlockHeight) + buffer.WriteString(str) + + str = fmt.Sprintf("Prefix: %v\n", td.Prefix) + buffer.WriteString(str) + + str = fmt.Sprintf("Missed: %v\n", td.Missed) + buffer.WriteString(str) + + str = fmt.Sprintf("Expired: %v\n", td.Expired) + buffer.WriteString(str) + + return buffer.String() +} + +// DebugTicketDBLiveString prints out the number of tickets in each +// bucket of the ticket database as a string. +func DebugTicketDBLiveString(tmdb *stake.TicketDB, chainParams *chaincfg.Params) (string, error) { + var buffer bytes.Buffer + buffer.WriteString("\n") + + for i := 0; i < stake.BucketsSize; i++ { + bucketTickets, err := tmdb.DumpLiveTickets(uint8(i)) + if err != nil { + return "", err + } + + str := fmt.Sprintf("%v: %v\t", i, len(bucketTickets)) + buffer.WriteString(str) + + // Add newlines. + if (i+1)%4 == 0 { + buffer.WriteString("\n") + } + } + + return buffer.String(), nil +} + +// DebugTicketDBLiveBucketString returns a string containing the ticket hashes +// found in a specific bucket of the live ticket database. If the verbose flag +// is called, it dumps the contents of the ticket data as well. +func DebugTicketDBLiveBucketString(tmdb *stake.TicketDB, bucket uint8, verbose bool) (string, error) { + var buffer bytes.Buffer + + str := fmt.Sprintf("Contents of live ticket bucket %v:\n", bucket) + buffer.WriteString(str) + + bucketTickets, err := tmdb.DumpLiveTickets(bucket) + if err != nil { + return "", err + } + + for hash, td := range bucketTickets { + str = fmt.Sprintf("%v\n", hash) + buffer.WriteString(str) + + if verbose { + str = fmt.Sprintf("%v\n", DebugTicketDataString(td)) + buffer.WriteString(str) + } + } + + return buffer.String(), nil +} + +// DebugTicketDBSpentBucketString prints the contents of the spent tickets +// database bucket indicated to a string that is returned. If the verbose +// flag is indicated, the contents of each ticket are printed as well. +func DebugTicketDBSpentBucketString(tmdb *stake.TicketDB, height int64, verbose bool) (string, error) { + var buffer bytes.Buffer + + str := fmt.Sprintf("Contents of spent ticket bucket height %v:\n", height) + buffer.WriteString(str) + + bucketTickets, err := tmdb.DumpSpentTickets(height) + if err != nil { + return "", err + } + + for hash, td := range bucketTickets { + missedStr := "" + if td.Missed { + missedStr = "Missed" + } else { + missedStr = "Spent" + } + str = fmt.Sprintf("%v (%v)\n", hash, missedStr) + buffer.WriteString(str) + + if verbose { + str = fmt.Sprintf("%v\n", DebugTicketDataString(td)) + buffer.WriteString(str) + } + } + + return buffer.String(), nil +} + +// DebugTicketDBMissedString prints out the contents of the missed ticket +// database to a string. If verbose is indicated, the ticket data itself +// is printed along with the ticket hashes. +func DebugTicketDBMissedString(tmdb *stake.TicketDB, verbose bool) (string, error) { + var buffer bytes.Buffer + + str := fmt.Sprintf("Contents of missed ticket database:\n") + buffer.WriteString(str) + + bucketTickets, err := tmdb.DumpMissedTickets() + if err != nil { + return "", err + } + + for hash, td := range bucketTickets { + str = fmt.Sprintf("%v\n", hash) + buffer.WriteString(str) + + if verbose { + str = fmt.Sprintf("%v\n", DebugTicketDataString(td)) + buffer.WriteString(str) + } + } + + return buffer.String(), nil +} + +// writeTicketDataToBuf writes some ticket data into a buffer as serialized +// data. +func writeTicketDataToBuf(buf *bytes.Buffer, td *stake.TicketData) { + buf.Write(td.SStxHash[:]) + buf.Write(td.SpendHash[:]) + + // OK for our purposes. + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(td.BlockHeight)) + buf.Write(b) + + buf.Write([]byte{byte(td.Prefix)}) + + if td.Missed { + buf.Write([]byte{0x01}) + } else { + buf.Write([]byte{0x00}) + } + + if td.Expired { + buf.Write([]byte{0x01}) + } else { + buf.Write([]byte{0x00}) + } +} + +// DebugTxStoreData returns a string containing information about the data +// stored in the given TxStore. +func DebugTxStoreData(txs TxStore) string { + if txs == nil { + return "" + } + + var buffer bytes.Buffer + + for _, txd := range txs { + str := fmt.Sprintf("Hash: %v\n", txd.Hash) + buffer.WriteString(str) + str = fmt.Sprintf("Height: %v\n", txd.BlockHeight) + buffer.WriteString(str) + str = fmt.Sprintf("Tx: %v\n", txd.Tx) + buffer.WriteString(str) + str = fmt.Sprintf("Spent: %v\n", txd.Spent) + buffer.WriteString(str) + str = fmt.Sprintf("Err: %v\n\n", txd.Err) + buffer.WriteString(str) + } + + return buffer.String() +} + +// TicketDbThumbprint takes all the tickets in the respective ticket db, +// sorts them, hashes their contents into a list, and then hashes that list. +// The resultant hash is the thumbprint of the ticket database, and should +// be the same across all clients that are synced to the same block. Returns +// an array of hashes len 3, containing (1) live tickets (2) spent tickets +// and (3) missed tickets. +// Do NOT use on mainnet or in production. For debug use only! Make sure +// the blockchain is frozen when you call this function. +func TicketDbThumbprint(tmdb *stake.TicketDB, chainParams *chaincfg.Params) ([]*chainhash.Hash, error) { + // Container for the three master hashes to go into. + dbThumbprints := make([]*chainhash.Hash, 3, 3) + + // (1) Live tickets. + allLiveTickets := stake.NewTicketDataSliceEmpty() + for i := 0; i < stake.BucketsSize; i++ { + bucketTickets, err := tmdb.DumpLiveTickets(uint8(i)) + if err != nil { + return nil, err + } + + for _, td := range bucketTickets { + allLiveTickets = append(allLiveTickets, td) + } + } + + // Sort by the number data hash, since we already have this implemented + // and it's also unique. + sort.Sort(allLiveTickets) + + // Create a buffer, dump all the data into it, and hash. + var buf bytes.Buffer + for _, td := range allLiveTickets { + writeTicketDataToBuf(&buf, td) + } + + liveHash := chainhash.HashFunc(buf.Bytes()) + liveThumbprint, err := chainhash.NewHash(liveHash[:]) + if err != nil { + return nil, err + } + dbThumbprints[0] = liveThumbprint + + // (2) Spent tickets. + height := tmdb.GetTopBlock() + + allSpentTickets := stake.NewTicketDataSliceEmpty() + for i := int64(chainParams.StakeEnabledHeight); i <= height; i++ { + bucketTickets, err := tmdb.DumpSpentTickets(i) + if err != nil { + return nil, err + } + + for _, td := range bucketTickets { + allSpentTickets = append(allSpentTickets, td) + } + } + + sort.Sort(allSpentTickets) + + buf.Reset() // Flush buffer + for _, td := range allSpentTickets { + writeTicketDataToBuf(&buf, td) + } + + spentHash := chainhash.HashFunc(buf.Bytes()) + spentThumbprint, err := chainhash.NewHash(spentHash[:]) + if err != nil { + return nil, err + } + dbThumbprints[1] = spentThumbprint + + // (3) Missed tickets. + allMissedTickets := stake.NewTicketDataSliceEmpty() + missedTickets, err := tmdb.DumpMissedTickets() + if err != nil { + return nil, err + } + + for _, td := range missedTickets { + allMissedTickets = append(allMissedTickets, td) + } + + sort.Sort(allMissedTickets) + + buf.Reset() // Flush buffer + missedHash := chainhash.HashFunc(buf.Bytes()) + missedThumbprint, err := chainhash.NewHash(missedHash[:]) + if err != nil { + return nil, err + } + dbThumbprints[2] = missedThumbprint + + return dbThumbprints, nil +} + +// findWhereDoubleSpent determines where a tx was previously doublespent. +// VERY INTENSIVE BLOCKCHAIN SCANNING, USE TO DEBUG SIMULATED BLOCKCHAINS +// ONLY. +func (b *BlockChain) findWhereDoubleSpent(block *dcrutil.Block) error { + height := int64(1) + heightEnd := block.Height() + + hashes, err := b.db.FetchHeightRange(height, heightEnd) + if err != nil { + return err + } + + var allTxs []*dcrutil.Tx + txs := block.Transactions()[1:] + stxs := block.STransactions() + allTxs = append(txs, stxs...) + + for _, hash := range hashes { + curBlock, err := b.getBlockFromHash(&hash) + if err != nil { + return err + } + log.Errorf("Cur block %v", curBlock.Height()) + + for _, localTx := range allTxs { + for _, localTxIn := range localTx.MsgTx().TxIn { + for _, tx := range curBlock.Transactions()[1:] { + for _, txIn := range tx.MsgTx().TxIn { + if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { + log.Errorf("Double spend of {hash: %v, idx: %v,"+ + " tree: %b}, previously found in tx %v "+ + "of block %v txtree regular", + txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index, + txIn.PreviousOutPoint.Tree, + tx.Sha(), + hash) + } + } + } + + for _, tx := range curBlock.STransactions() { + for _, txIn := range tx.MsgTx().TxIn { + if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { + log.Errorf("Double spend of {hash: %v, idx: %v,"+ + " tree: %b}, previously found in tx %v "+ + "of block %v txtree stake\n", + txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index, + txIn.PreviousOutPoint.Tree, + tx.Sha(), + hash) + } + } + } + } + } + } + + for _, localTx := range stxs { + for _, localTxIn := range localTx.MsgTx().TxIn { + for _, tx := range txs { + for _, txIn := range tx.MsgTx().TxIn { + if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { + log.Errorf("Double spend of {hash: %v, idx: %v,"+ + " tree: %b}, previously found in tx %v "+ + "of cur block stake txtree\n", + txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index, + txIn.PreviousOutPoint.Tree, + tx.Sha()) + } + } + } + } + } + + return nil +} diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 8a557570..7065060e 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -13,17 +14,18 @@ import ( "path/filepath" "strings" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - _ "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + _ "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // testDbType is the database backend type to use for the tests. -const testDbType = "memdb" +const testDbType = "leveldb" // testDbRoot is the root directory used to create all test databases. const testDbRoot = "testdbs" @@ -54,7 +56,7 @@ func isSupportedDbType(dbType string) bool { // chainSetup is used to create a new db and chain instance with the genesis // block already inserted. In addition to the new chain instnce, it returns // a teardown function the caller should invoke when done testing to clean up. -func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { +func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, func(), error) { if !isSupportedDbType(testDbType) { return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) } @@ -62,6 +64,8 @@ func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { // Handle memory database specially since it doesn't need the disk // specific handling. var db database.Db + tmdb := new(stake.TicketDB) + var teardown func() if testDbType == "memdb" { ndb, err := database.CreateDB(testDbType) @@ -73,6 +77,7 @@ func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { // Setup a teardown function for cleaning up. This function is // returned to the caller to be invoked when it is done testing. teardown = func() { + tmdb.Close() db.Close() } } else { @@ -98,6 +103,7 @@ func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { // returned to the caller to be invoked when it is done testing. teardown = func() { dbVersionPath := filepath.Join(testDbRoot, dbName+".ver") + tmdb.Close() db.Sync() db.Close() os.RemoveAll(dbPath) @@ -108,7 +114,8 @@ func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { // Insert the main network genesis block. This is part of the initial // database setup. - genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesisBlock := dcrutil.NewBlock(params.GenesisBlock) + genesisBlock.SetHeight(int64(0)) _, err := db.InsertBlock(genesisBlock) if err != nil { teardown() @@ -116,7 +123,11 @@ func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) { return nil, nil, err } - chain := blockchain.New(db, &chaincfg.MainNetParams, nil) + // Start the ticket database. + tmdb.Initialize(params, db) + tmdb.RescanTicketDB() + + chain := blockchain.New(db, tmdb, params, nil) return chain, teardown, nil } @@ -173,7 +184,7 @@ func loadTxStore(filename string) (blockchain.TxStore, error) { if err != nil { return nil, err } - txD.Tx = btcutil.NewTx(&msgTx) + txD.Tx = dcrutil.NewTx(&msgTx) // Transaction hash. txHash := msgTx.TxSha() diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index 0a045571..7cc4e580 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -1,49 +1,22 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain import ( - "fmt" "math/big" "time" - "github.com/btcsuite/btcd/wire" -) - -const ( - // targetTimespan is the desired amount of time that should elapse - // before block difficulty requirement is examined to determine how - // it should be changed in order to maintain the desired block - // generation rate. - targetTimespan = time.Hour * 24 * 14 - - // targetSpacing is the desired amount of time to generate each block. - targetSpacing = time.Minute * 10 - - // BlocksPerRetarget is the number of blocks between each difficulty - // retarget. It is calculated based on the desired block generation - // rate. - BlocksPerRetarget = int64(targetTimespan / targetSpacing) - - // retargetAdjustmentFactor is the adjustment factor used to limit - // the minimum and maximum amount of adjustment that can occur between - // difficulty retargets. - retargetAdjustmentFactor = 4 - - // minRetargetTimespan is the minimum amount of adjustment that can - // occur between difficulty retargets. It equates to 25% of the - // previous difficulty. - minRetargetTimespan = int64(targetTimespan / retargetAdjustmentFactor) - - // maxRetargetTimespan is the maximum amount of adjustment that can - // occur between difficulty retargets. It equates to 400% of the - // previous difficulty. - maxRetargetTimespan = int64(targetTimespan * retargetAdjustmentFactor) + "github.com/decred/dcrd/chaincfg/chainhash" ) var ( + // bigZero is 0 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigZero = big.NewInt(0) + // bigOne is 1 represented as a big.Int. It is defined here to avoid // the overhead of creating it multiple times. bigOne = big.NewInt(1) @@ -51,11 +24,15 @@ var ( // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid // the overhead of creating it multiple times. oneLsh256 = new(big.Int).Lsh(bigOne, 256) + + // maxShift is the maximum shift for a difficulty that resets (e.g. + // testnet difficulty). + maxShift = uint(256) ) // ShaHashToBig converts a wire.ShaHash into a big.Int that can be used to // perform math comparisons. -func ShaHashToBig(hash *wire.ShaHash) *big.Int { +func ShaHashToBig(hash *chainhash.Hash) *big.Int { // A ShaHash is in little-endian, but the big package wants the bytes // in big-endian, so reverse them. buf := *hash @@ -87,7 +64,7 @@ func ShaHashToBig(hash *wire.ShaHash) *big.Int { // The formula to calculate N is: // N = (-1^sign) * mantissa * 256^(exponent-3) // -// This compact form is only used in bitcoin to encode unsigned 256-bit numbers +// This compact form is only used in decred to encode unsigned 256-bit numbers // which represent difficulty targets, thus there really is not a need for a // sign bit, but it is implemented here to stay consistent with bitcoind. func CompactToBig(compact uint32) *big.Int { @@ -160,7 +137,7 @@ func BigToCompact(n *big.Int) uint32 { return compact } -// CalcWork calculates a work value from difficulty bits. Bitcoin increases +// CalcWork calculates a work value from difficulty bits. Decred increases // the difficulty for generating a block by decreasing the value which the // generated hash must be less than. This difficulty target is stored in each // block header using a compact representation as described in the documenation @@ -188,16 +165,19 @@ func CalcWork(bits uint32) *big.Int { // can have given starting difficulty bits and a duration. It is mainly used to // verify that claimed proof of work by a block is sane as compared to a // known good checkpoint. -func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 { +func (b *BlockChain) calcEasiestDifficulty(bits uint32, + duration time.Duration) uint32 { // Convert types used in the calculations below. durationVal := int64(duration) - adjustmentFactor := big.NewInt(retargetAdjustmentFactor) + adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor) + maxRetargetTimespan := int64(b.chainParams.TargetTimespan) * + b.chainParams.RetargetAdjustmentFactor // The test network rules allow minimum difficulty blocks after more // than twice the desired amount of time needed to generate a block has // elapsed. if b.chainParams.ResetMinDifficulty { - if durationVal > int64(targetSpacing)*2 { + if durationVal > int64(b.chainParams.TimePerBlock)*2 { return b.chainParams.PowLimitBits } } @@ -222,11 +202,14 @@ func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) // findPrevTestNetDifficulty returns the difficulty of the previous block which // did not have the special testnet minimum difficulty rule applied. -func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) (uint32, error) { +func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) (uint32, + error) { // Search backwards through the chain for the last block without // the special rule applied. + blocksPerRetarget := b.chainParams.WorkDiffWindowSize * + b.chainParams.WorkDiffWindows iterNode := startNode - for iterNode != nil && iterNode.height%BlocksPerRetarget != 0 && + for iterNode != nil && iterNode.height%blocksPerRetarget != 0 && iterNode.bits == b.chainParams.PowLimitBits { // Get the previous block node. This function is used over @@ -256,15 +239,20 @@ func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) (uint32, er // This function differs from the exported CalcNextRequiredDifficulty in that // the exported version uses the current best chain as the previous block node // while this function accepts any block node. -func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) { +func (b *BlockChain) calcNextRequiredDifficulty(curNode *blockNode, + newBlockTime time.Time) (uint32, error) { // Genesis block. - if lastNode == nil { + if curNode == nil { return b.chainParams.PowLimitBits, nil } - // Return the previous block's difficulty requirements if this block - // is not at a difficulty retarget interval. - if (lastNode.height+1)%BlocksPerRetarget != 0 { + // Get the old difficulty; if we aren't at a block height where it changes, + // just return this. + oldDiff := curNode.header.Bits + oldDiffBig := CompactToBig(curNode.header.Bits) + + // We're not at a retarget point, return the oldDiff. + if (curNode.height+1)%b.chainParams.WorkDiffWindowSize != 0 { // The test network rules allow minimum difficulty blocks after // more than twice the desired amount of time needed to generate // a block has elapsed. @@ -272,83 +260,185 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // Return minimum difficulty when more than twice the // desired amount of time needed to generate a block has // elapsed. - allowMinTime := lastNode.timestamp.Add(targetSpacing * 2) + allowMinTime := curNode.timestamp.Add(b.chainParams.TimePerBlock * + b.chainParams.MinDiffResetTimeFactor) + + // For every extra target timespan that passes, we halve the + // difficulty. if newBlockTime.After(allowMinTime) { - return b.chainParams.PowLimitBits, nil + timePassed := newBlockTime.Sub(curNode.timestamp) + timePassed -= (b.chainParams.TimePerBlock * + b.chainParams.MinDiffResetTimeFactor) + shifts := uint((timePassed / b.chainParams.TimePerBlock) + 1) + + // Scale the difficulty with time passed. + oldTarget := CompactToBig(curNode.header.Bits) + newTarget := new(big.Int) + if shifts < maxShift { + newTarget.Lsh(oldTarget, shifts) + } else { + newTarget.Set(oneLsh256) + } + + // Limit new value to the proof of work limit. + if newTarget.Cmp(b.chainParams.PowLimit) > 0 { + newTarget.Set(b.chainParams.PowLimit) + } + + return BigToCompact(newTarget), nil } // The block was mined within the desired timeframe, so // return the difficulty for the last block which did // not have the special minimum difficulty rule applied. - prevBits, err := b.findPrevTestNetDifficulty(lastNode) + prevBits, err := b.findPrevTestNetDifficulty(curNode) if err != nil { return 0, err } return prevBits, nil } - // For the main network (or any unrecognized networks), simply - // return the previous block's difficulty requirements. - return lastNode.bits, nil + return oldDiff, nil } - // Get the block node at the previous retarget (targetTimespan days - // worth of blocks). - firstNode := lastNode - for i := int64(0); i < BlocksPerRetarget-1 && firstNode != nil; i++ { - // Get the previous block node. This function is used over - // simply accessing firstNode.parent directly as it will - // dynamically create previous block nodes as needed. This - // helps allow only the pieces of the chain that are needed - // to remain in memory. + // Declare some useful variables. + RAFBig := big.NewInt(b.chainParams.RetargetAdjustmentFactor) + nextDiffBigMin := CompactToBig(curNode.header.Bits) + nextDiffBigMin.Div(nextDiffBigMin, RAFBig) + nextDiffBigMax := CompactToBig(curNode.header.Bits) + nextDiffBigMax.Mul(nextDiffBigMax, RAFBig) + + alpha := b.chainParams.WorkDiffAlpha + + // Number of nodes to traverse while calculating difficulty. + nodesToTraverse := (b.chainParams.WorkDiffWindowSize * + b.chainParams.WorkDiffWindows) + + // Initialize bigInt slice for the percentage changes for each window period + // above or below the target. + windowChanges := make([]*big.Int, b.chainParams.WorkDiffWindows) + + // Regress through all of the previous blocks and store the percent changes + // per window period; use bigInts to emulate 64.32 bit fixed point. + oldNode := curNode + windowPeriod := int64(0) + weights := uint64(0) + recentTime := curNode.header.Timestamp.UnixNano() + olderTime := int64(0) + + for i := int64(0); ; i++ { + // Store and reset after reaching the end of every window period. + if i%b.chainParams.WorkDiffWindowSize == 0 && i != 0 { + olderTime = oldNode.header.Timestamp.UnixNano() + timeDifference := recentTime - olderTime + + // Just assume we're at the target (no change) if we've + // gone all the way back to the genesis block. + if oldNode.height == 0 { + timeDifference = int64(b.chainParams.TargetTimespan) + } + + timeDifBig := big.NewInt(timeDifference) + timeDifBig.Lsh(timeDifBig, 32) // Add padding + targetTemp := big.NewInt(int64(b.chainParams.TargetTimespan)) + + windowAdjusted := targetTemp.Div(timeDifBig, targetTemp) + + // Weight it exponentially. Be aware that this could at some point + // overflow if alpha or the number of blocks used is really large. + windowAdjusted = windowAdjusted.Lsh(windowAdjusted, + uint((b.chainParams.WorkDiffWindows-windowPeriod)*alpha)) + + // Sum up all the different weights incrementally. + weights += 1 << uint64((b.chainParams.WorkDiffWindows-windowPeriod)* + alpha) + + // Store it in the slice. + windowChanges[windowPeriod] = windowAdjusted + + windowPeriod++ + + recentTime = olderTime + } + + if i == nodesToTraverse { + break // Exit for loop when we hit the end. + } + + // Get the previous block node. var err error - firstNode, err = b.getPrevNodeFromNode(firstNode) + tempNode := oldNode + oldNode, err = b.getPrevNodeFromNode(oldNode) if err != nil { return 0, err } + + // If we're at the genesis block, reset the oldNode + // so that it stays at the genesis block. + if oldNode == nil { + oldNode = tempNode + } } - if firstNode == nil { - return 0, fmt.Errorf("unable to obtain previous retarget block") + // Sum up the weighted window periods. + weightedSum := big.NewInt(0) + for i := int64(0); i < b.chainParams.WorkDiffWindows; i++ { + weightedSum.Add(weightedSum, windowChanges[i]) } - // Limit the amount of adjustment that can occur to the previous - // difficulty. - actualTimespan := lastNode.timestamp.UnixNano() - firstNode.timestamp.UnixNano() - adjustedTimespan := actualTimespan - if actualTimespan < minRetargetTimespan { - adjustedTimespan = minRetargetTimespan - } else if actualTimespan > maxRetargetTimespan { - adjustedTimespan = maxRetargetTimespan - } + // Divide by the sum of all weights. + weightsBig := big.NewInt(int64(weights)) + weightedSumDiv := weightedSum.Div(weightedSum, weightsBig) - // Calculate new target difficulty as: - // currentDifficulty * (adjustedTimespan / targetTimespan) - // The result uses integer division which means it will be slightly - // rounded down. Bitcoind also uses integer division to calculate this - // result. - oldTarget := CompactToBig(lastNode.bits) - newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) - newTarget.Div(newTarget, big.NewInt(int64(targetTimespan))) + // Multiply by the old diff. + nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig) + + // Right shift to restore the original padding (restore non-fixed point). + nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32) + + // Check to see if we're over the limits for the maximum allowable retarget; + // if we are, return the maximum or minimum except in the case that oldDiff + // is zero. + if oldDiffBig.Cmp(bigZero) == 0 { // This should never really happen, + nextDiffBig.Set(nextDiffBig) // but in case it does... + } else if nextDiffBig.Cmp(bigZero) == 0 { + nextDiffBig.Set(b.chainParams.PowLimit) + } else if nextDiffBig.Cmp(nextDiffBigMax) == 1 { + nextDiffBig.Set(nextDiffBigMax) + } else if nextDiffBig.Cmp(nextDiffBigMin) == -1 { + nextDiffBig.Set(nextDiffBigMin) + } // Limit new value to the proof of work limit. - if newTarget.Cmp(b.chainParams.PowLimit) > 0 { - newTarget.Set(b.chainParams.PowLimit) + if nextDiffBig.Cmp(b.chainParams.PowLimit) > 0 { + nextDiffBig.Set(b.chainParams.PowLimit) } // Log new target difficulty and return it. The new target logging is // intentionally converting the bits back to a number instead of using // newTarget since conversion to the compact representation loses // precision. - newTargetBits := BigToCompact(newTarget) - log.Debugf("Difficulty retarget at block height %d", lastNode.height+1) - log.Debugf("Old target %08x (%064x)", lastNode.bits, oldTarget) - log.Debugf("New target %08x (%064x)", newTargetBits, CompactToBig(newTargetBits)) - log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v", - time.Duration(actualTimespan), time.Duration(adjustedTimespan), - targetTimespan) + nextDiffBits := BigToCompact(nextDiffBig) + log.Debugf("Difficulty retarget at block height %d", curNode.height+1) + log.Debugf("Old target %08x (%064x)", curNode.header.Bits, oldDiffBig) + log.Debugf("New target %08x (%064x)", nextDiffBits, CompactToBig(nextDiffBits)) - return newTargetBits, nil + return nextDiffBits, nil +} + +// CalcNextRequiredDiffFromNode calculates the required difficulty for the block +// given with the passed hash along with the given timestamp. +// +// This function is NOT safe for concurrent access. +func (b *BlockChain) CalcNextRequiredDiffFromNode(hash *chainhash.Hash, + timestamp time.Time) (uint32, error) { + // Fetch the block to get the difficulty for. + node, err := b.findNode(hash) + if err != nil { + return 0, err + } + + return b.calcNextRequiredDifficulty(node, timestamp) } // CalcNextRequiredDifficulty calculates the required difficulty for the block @@ -356,6 +446,297 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // rules. // // This function is NOT safe for concurrent access. -func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { +func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, + error) { return b.calcNextRequiredDifficulty(b.bestChain, timestamp) } + +// mergeDifficulty takes an original stake difficulty and two new, scaled +// stake difficulties, merges the new difficulties, and outputs a new +// merged stake difficulty. +func mergeDifficulty(oldDiff int64, newDiff1 int64, newDiff2 int64) int64 { + newDiff1Big := big.NewInt(newDiff1) + newDiff2Big := big.NewInt(newDiff2) + newDiff2Big.Lsh(newDiff2Big, 32) + + oldDiffBig := big.NewInt(oldDiff) + oldDiffBigLSH := big.NewInt(oldDiff) + oldDiffBigLSH.Lsh(oldDiffBig, 32) + + newDiff1Big.Div(oldDiffBigLSH, newDiff1Big) + newDiff2Big.Div(newDiff2Big, oldDiffBig) + + // Combine the two changes in difficulty. + summedChange := big.NewInt(0) + summedChange.Set(newDiff2Big) + summedChange.Lsh(summedChange, 32) + summedChange.Div(summedChange, newDiff1Big) + summedChange.Mul(summedChange, oldDiffBig) + summedChange.Rsh(summedChange, 32) + + return summedChange.Int64() +} + +// calcNextRequiredStakeDifficulty calculates the exponentially weighted average +// and then uses it to determine the next stake difficulty. +// TODO: You can combine the first and second for loops below for a speed up +// if you'd like, I'm not sure how much it matters. +func (b *BlockChain) calcNextRequiredStakeDifficulty(curNode *blockNode) (int64, + error) { + alpha := b.chainParams.StakeDiffAlpha + stakeDiffStartHeight := int64(b.chainParams.CoinbaseMaturity) + + 1 + maxRetarget := int64(b.chainParams.RetargetAdjustmentFactor) + TicketPoolWeight := int64(b.chainParams.TicketPoolSizeWeight) + + // Number of nodes to traverse while calculating difficulty. + nodesToTraverse := (b.chainParams.StakeDiffWindowSize * + b.chainParams.StakeDiffWindows) + + // Genesis block. Block at height 1 has these parameters. + // Additionally, if we're before the time when people generally begin + // purchasing tickets, just use the MinimumStakeDiff. + // This is sort of sloppy and coded with the hopes that generally by + // stakeDiffStartHeight people will be submitting lots of SStx over the + // past nodesToTraverse many nodes. It should be okay with the default + // Decred parameters, but might do weird things if you use custom + // parameters. + if curNode == nil || + curNode.height < stakeDiffStartHeight { + return b.chainParams.MinimumStakeDiff, nil + } + + // Get the old difficulty; if we aren't at a block height where it changes, + // just return this. + oldDiff := curNode.header.SBits + if (curNode.height+1)%b.chainParams.StakeDiffWindowSize != 0 { + return oldDiff, nil + } + + // The target size of the ticketPool in live tickets. Recast these as int64 + // to avoid possible overflows for large sizes of either variable in + // params. + targetForTicketPool := int64(b.chainParams.TicketsPerBlock) * + int64(b.chainParams.TicketPoolSize) + + // Initialize bigInt slice for the percentage changes for each window period + // above or below the target. + windowChanges := make([]*big.Int, b.chainParams.StakeDiffWindows) + + // Regress through all of the previous blocks and store the percent changes + // per window period; use bigInts to emulate 64.32 bit fixed point. + oldNode := curNode + windowPeriod := int64(0) + weights := uint64(0) + + for i := int64(0); ; i++ { + // Store and reset after reaching the end of every window period. + if (i+1)%b.chainParams.StakeDiffWindowSize == 0 { + // First adjust based on ticketPoolSize. Skew the difference + // in ticketPoolSize by max adjustment factor to help + // weight ticket pool size versus tickets per block. + poolSizeSkew := (int64(oldNode.header.PoolSize)- + targetForTicketPool)*TicketPoolWeight + targetForTicketPool + + // Don't let this be negative or zero. + if poolSizeSkew <= 0 { + poolSizeSkew = 1 + } + + curPoolSizeTemp := big.NewInt(poolSizeSkew) + curPoolSizeTemp.Lsh(curPoolSizeTemp, 32) // Add padding + targetTemp := big.NewInt(targetForTicketPool) + + windowAdjusted := curPoolSizeTemp.Div(curPoolSizeTemp, targetTemp) + + // Weight it exponentially. Be aware that this could at some point + // overflow if alpha or the number of blocks used is really large. + windowAdjusted = windowAdjusted.Lsh(windowAdjusted, + uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha)) + + // Sum up all the different weights incrementally. + weights += 1 << uint64((b.chainParams.StakeDiffWindows-windowPeriod)* + alpha) + + // Store it in the slice. + windowChanges[windowPeriod] = windowAdjusted + + // windowFreshStake = 0 + windowPeriod++ + } + + if (i + 1) == nodesToTraverse { + break // Exit for loop when we hit the end. + } + + // Get the previous block node. + var err error + tempNode := oldNode + oldNode, err = b.getPrevNodeFromNode(oldNode) + if err != nil { + return 0, err + } + + // If we're at the genesis block, reset the oldNode + // so that it stays at the genesis block. + if oldNode == nil { + oldNode = tempNode + } + } + + // Sum up the weighted window periods. + weightedSum := big.NewInt(0) + for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ { + weightedSum.Add(weightedSum, windowChanges[i]) + } + + // Divide by the sum of all weights. + weightsBig := big.NewInt(int64(weights)) + weightedSumDiv := weightedSum.Div(weightedSum, weightsBig) + + // Multiply by the old stake diff. + oldDiffBig := big.NewInt(oldDiff) + nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig) + + // Right shift to restore the original padding (restore non-fixed point). + nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32) + nextDiffTicketPool := nextDiffBig.Int64() + + // Check to see if we're over the limits for the maximum allowable retarget; + // if we are, return the maximum or minimum except in the case that oldDiff + // is zero. + if oldDiff == 0 { // This should never really happen, but in case it does... + return nextDiffTicketPool, nil + } else if nextDiffTicketPool == 0 { + nextDiffTicketPool = oldDiff / maxRetarget + } else if (nextDiffTicketPool / oldDiff) > (maxRetarget - 1) { + nextDiffTicketPool = oldDiff * maxRetarget + } else if (oldDiff / nextDiffTicketPool) > (maxRetarget - 1) { + nextDiffTicketPool = oldDiff / maxRetarget + } + + // The target number of new SStx per block for any given window period. + targetForWindow := b.chainParams.StakeDiffWindowSize * + int64(b.chainParams.TicketsPerBlock) + + // Regress through all of the previous blocks and store the percent changes + // per window period; use bigInts to emulate 64.32 bit fixed point. + oldNode = curNode + windowFreshStake := int64(0) + windowPeriod = int64(0) + weights = uint64(0) + + for i := int64(0); ; i++ { + // Add the fresh stake into the store for this window period. + windowFreshStake += int64(oldNode.header.FreshStake) + + // Store and reset after reaching the end of every window period. + if (i+1)%b.chainParams.StakeDiffWindowSize == 0 { + // Don't let fresh stake be zero. + if windowFreshStake <= 0 { + windowFreshStake = 1 + } + + freshTemp := big.NewInt(windowFreshStake) + freshTemp.Lsh(freshTemp, 32) // Add padding + targetTemp := big.NewInt(targetForWindow) + + // Get the percentage change. + windowAdjusted := freshTemp.Div(freshTemp, targetTemp) + + // Weight it exponentially. Be aware that this could at some point + // overflow if alpha or the number of blocks used is really large. + windowAdjusted = windowAdjusted.Lsh(windowAdjusted, + uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha)) + + // Sum up all the different weights incrementally. + weights += 1 << + uint64((b.chainParams.StakeDiffWindows-windowPeriod)*alpha) + + // Store it in the slice. + windowChanges[windowPeriod] = windowAdjusted + + windowFreshStake = 0 + windowPeriod++ + } + + if (i + 1) == nodesToTraverse { + break // Exit for loop when we hit the end. + } + + // Get the previous block node. + var err error + tempNode := oldNode + oldNode, err = b.getPrevNodeFromNode(oldNode) + if err != nil { + return 0, err + } + + // If we're at the genesis block, reset the oldNode + // so that it stays at the genesis block. + if oldNode == nil { + oldNode = tempNode + } + } + + // Sum up the weighted window periods. + weightedSum = big.NewInt(0) + for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ { + weightedSum.Add(weightedSum, windowChanges[i]) + } + + // Divide by the sum of all weights. + weightsBig = big.NewInt(int64(weights)) + weightedSumDiv = weightedSum.Div(weightedSum, weightsBig) + + // Multiply by the old stake diff. + oldDiffBig = big.NewInt(oldDiff) + nextDiffBig = weightedSumDiv.Mul(weightedSumDiv, oldDiffBig) + + // Right shift to restore the original padding (restore non-fixed point). + nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32) + nextDiffFreshStake := nextDiffBig.Int64() + + // Check to see if we're over the limits for the maximum allowable retarget; + // if we are, return the maximum or minimum except in the case that oldDiff + // is zero. + if oldDiff == 0 { // This should never really happen, but in case it does... + return nextDiffFreshStake, nil + } else if nextDiffFreshStake == 0 { + nextDiffFreshStake = oldDiff / maxRetarget + } else if (nextDiffFreshStake / oldDiff) > (maxRetarget - 1) { + nextDiffFreshStake = oldDiff * maxRetarget + } else if (oldDiff / nextDiffFreshStake) > (maxRetarget - 1) { + nextDiffFreshStake = oldDiff / maxRetarget + } + + // Average the two differences using scaled multiplication. + nextDiff := mergeDifficulty(oldDiff, nextDiffTicketPool, nextDiffFreshStake) + + // Check to see if we're over the limits for the maximum allowable retarget; + // if we are, return the maximum or minimum except in the case that oldDiff + // is zero. + if oldDiff == 0 { // This should never really happen, but in case it does... + return oldDiff, nil + } else if nextDiff == 0 { + nextDiff = oldDiff / maxRetarget + } else if (nextDiff / oldDiff) > (maxRetarget - 1) { + nextDiff = oldDiff * maxRetarget + } else if (oldDiff / nextDiff) > (maxRetarget - 1) { + nextDiff = oldDiff / maxRetarget + } + + // If the next diff is below the network minimum, set the required stake + // difficulty to the minimum. + if nextDiff < b.chainParams.MinimumStakeDiff { + return b.chainParams.MinimumStakeDiff, nil + } + + return nextDiff, nil +} + +// CalcNextRequiredStakeDifficulty is the exported version of the above function. +// This function is NOT safe for concurrent access. +func (b *BlockChain) CalcNextRequiredStakeDifficulty() (int64, error) { + return b.calcNextRequiredStakeDifficulty(b.bestChain) +} diff --git a/blockchain/difficulty_test.go b/blockchain/difficulty_test.go index 58fa7f85..efde58c0 100644 --- a/blockchain/difficulty_test.go +++ b/blockchain/difficulty_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,8 +8,13 @@ package blockchain_test import ( "math/big" "testing" + "time" - "github.com/btcsuite/btcd/blockchain" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + "github.com/decred/dcrutil" ) func TestBigToCompact(t *testing.T) { @@ -69,3 +75,59 @@ func TestCalcWork(t *testing.T) { } } } + +// TODO Make more elaborate tests for difficulty. The difficulty algorithms +// have already been tested to death in simnet/testnet/mainnet simulations, +// but we should really have a unit test for them that includes tests for +// edge cases. +func TestDiff(t *testing.T) { + db, err := database.CreateDB("memdb") + if err != nil { + t.Errorf("Failed to create database: %v\n", err) + return + } + defer db.Close() + + var tmdb *stake.TicketDB + + genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + _, err = db.InsertBlock(genesisBlock) + if err != nil { + t.Errorf("Failed to insert genesis block: %v\n", err) + return + } + + chain := blockchain.New(db, tmdb, &chaincfg.MainNetParams, nil) + + //timeSource := blockchain.NewMedianTime() + + // Grab some blocks + + // Build fake blockchain + + // Calc new difficulty + + ts := time.Now() + + d, err := chain.CalcNextRequiredDifficulty(ts) + if err != nil { + t.Errorf("Failed to get difficulty: %v\n", err) + return + } + if d != 486604799 { // This is hardcoded in genesis block but not exported anywhere. + t.Error("Failed to get initial difficulty.") + } + + sd, err := chain.CalcNextRequiredStakeDifficulty() + if err != nil { + t.Errorf("Failed to get stake difficulty: %v\n", err) + return + } + if sd != chaincfg.MainNetParams.MinimumStakeDiff { + t.Error("Incorrect initial stake difficulty.") + } + + // Compare + + // Repeat for a few more +} diff --git a/blockchain/doc.go b/blockchain/doc.go index 24417541..d46afa92 100644 --- a/blockchain/doc.go +++ b/blockchain/doc.go @@ -1,14 +1,15 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package blockchain implements bitcoin block handling and chain selection rules. +Package blockchain implements decred block handling and chain selection rules. -The bitcoin block handling and chain selection rules are an integral, and quite -likely the most important, part of bitcoin. Unfortunately, at the time of +The decred block handling and chain selection rules are an integral, and quite +likely the most important, part of decred. Unfortunately, at the time of this writing, these rules are also largely undocumented and had to be -ascertained from the bitcoind source code. At its core, bitcoin is a +ascertained from the bitcoind source code. At its core, decred is a distributed consensus of which blocks are valid and which ones will comprise the main block chain (public ledger) that ultimately determines accepted transactions, so it is extremely important that fully validating nodes agree on @@ -20,13 +21,13 @@ functionality such as rejecting duplicate blocks, ensuring blocks and transactions follow all rules, orphan handling, and best chain selection along with reorganization. -Since this package does not deal with other bitcoin specifics such as network +Since this package does not deal with other decred specifics such as network communication or wallets, it provides a notification system which gives the caller a high level of flexibility in how they want to react to certain events such as orphan blocks which need their parents requested and newly connected main chain blocks which might result in wallet updates. -Bitcoin Chain Processing Overview +Decred Chain Processing Overview Before a block is allowed into the block chain, it must go through an intensive series of validation rules. The following list serves as a general outline of diff --git a/blockchain/error.go b/blockchain/error.go index 0db8bddc..126a685d 100644 --- a/blockchain/error.go +++ b/blockchain/error.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -17,10 +18,17 @@ const ( // exists. ErrDuplicateBlock ErrorCode = iota + // ErrMissingParent indicates that the block was an orphan. + ErrMissingParent + // ErrBlockTooBig indicates the serialized block size exceeds the // maximum allowed size. ErrBlockTooBig + // ErrWrongBlockSize indicates that the block size from the header was + // not the actual serialized size of the block. + ErrWrongBlockSize + // ErrBlockVersionTooOld indicates the block version is too old and is // no longer accepted since the majority of the network has upgraded // to a newer version. @@ -146,10 +154,22 @@ const ( // is not a coinbase transaction. ErrFirstTxNotCoinbase + // ErrCoinbaseHeight indicates that the encoded height in the coinbase + // is incorrect. + ErrCoinbaseHeight + // ErrMultipleCoinbases indicates a block contains more than one // coinbase transaction. ErrMultipleCoinbases + // ErrStakeTxInRegularTree indicates a stake transaction was found in + // the regular transaction tree. + ErrStakeTxInRegularTree + + // ErrRegTxInStakeTree indicates that a regular transaction was found in + // the stake transaction tree. + ErrRegTxInStakeTree + // ErrBadCoinbaseScriptLen indicates the length of the signature script // for a coinbase transaction is not within the valid range. ErrBadCoinbaseScriptLen @@ -158,15 +178,29 @@ const ( // not match the expected value of the subsidy plus the sum of all fees. ErrBadCoinbaseValue - // ErrMissingCoinbaseHeight indicates the coinbase transaction for a - // block does not start with the serialized block block height as - // required for version 2 and higher blocks. - ErrMissingCoinbaseHeight + // ErrBadCoinbaseOutpoint indicates that the outpoint used by a coinbase + // as input was non-null. + ErrBadCoinbaseOutpoint - // ErrBadCoinbaseHeight indicates the serialized block height in the - // coinbase transaction for version 2 and higher blocks does not match - // the expected value. - ErrBadCoinbaseHeight + // ErrBadCoinbaseFraudProof indicates that the fraud proof for a coinbase + // input was non-null. + ErrBadCoinbaseFraudProof + + // ErrBadCoinbaseAmountIn indicates that the AmountIn (=subsidy) for a + // coinbase input was incorrect. + ErrBadCoinbaseAmountIn + + // ErrBadStakebaseAmountIn indicates that the AmountIn (=subsidy) for a + // stakebase input was incorrect. + ErrBadStakebaseAmountIn + + // ErrBadStakebaseScriptLen indicates the length of the signature script + // for a stakebase transaction is not within the valid range. + ErrBadStakebaseScriptLen + + // ErrBadStakevaseScrVal indicates the signature script for a stakebase + // transaction was not set to the network consensus value. + ErrBadStakevaseScrVal // ErrScriptMalformed indicates a transaction script is malformed in // some way. For example, it might be longer than the maximum allowed @@ -178,48 +212,299 @@ const ( // such signature verification failures and execution past the end of // the stack. ErrScriptValidation + + // ErrNotEnoughStake indicates that there was for some SStx in a given block, + // the given SStx did not have enough stake to meet the network target. + ErrNotEnoughStake + + // ErrStakeBelowMinimum indicates that for some SStx in a given block, + // the given SStx had an amount of stake below the minimum network target. + ErrStakeBelowMinimum + + // ErrNonstandardStakeTx indicates that a block contained a stake tx that + // was not one of the allowed types of a stake transactions. + ErrNonstandardStakeTx + + // ErrNotEnoughVotes indicates that a block contained less than a majority + // of voters. + ErrNotEnoughVotes + + // ErrTooManyVotes indicates that a block contained more than the maximum + // allowable number of votes. + ErrTooManyVotes + + // ErrFreshStakeMismatch indicates that a block's header contained a different + // number of SStx as compared to what was found in the block. + ErrFreshStakeMismatch + + // ErrTooManySStxs indicates that more than the allowed number of SStx was + // found in a block. + ErrTooManySStxs + + // ErrInvalidEarlyStakeTx indicates that a tx type other than SStx was found + // in the stake tx tree before the period when stake validation begins, or + // before the stake tx type could possibly be included in the block. + ErrInvalidEarlyStakeTx + + // ErrTicketUnavailable indicates that a vote in the block spent a ticket + // that could not be found. + ErrTicketUnavailable + + // ErrVotesOnWrongBlock indicates that an SSGen voted on a block not the + // block's parent, and so was ineligible for inclusion into that block. + ErrVotesOnWrongBlock + + // ErrVotesMismatch indicates that the number of SSGen in the block was not + // equivalent to the number of votes provided in the block header. + ErrVotesMismatch + + // ErrIncongruentVotebit indicates that the first votebit in votebits was not + // the same as that determined by the majority of voters in the SSGen tx + // included in the block. + ErrIncongruentVotebit + + // ErrInvalidSSRtx indicates than an SSRtx in a block could not be found to + // have a valid missed sstx input as per the stake ticket database. + ErrInvalidSSRtx + + // ErrInvalidRevNum indicates that the number of revocations from the + // header was not the same as the number of SSRtx included in the block. + ErrInvalidRevNum + + // ErrTooManyRevocations indicates more revocations were found in a block + // than were allowed. + ErrTooManyRevocations + + // ErrSStxCommitment indicates that the propotional amounts from the inputs + // of an SStx did not match those found in the commitment outputs. + ErrSStxCommitment + + // ErrUnparseableSSGen indicates that the SSGen block vote or votebits data + // was unparseable from the null data outputs. + ErrUnparseableSSGen + + // ErrInvalidSSGenInput indicates that the input SStx to the SSGen tx was + // invalid because it was not an SStx. + ErrInvalidSSGenInput + + // ErrSSGenPayeeNum indicates that the number of payees from the referenced + // SSGen's SStx was not the same as the number of the payees in the outputs + // of the SSGen tx. + ErrSSGenPayeeNum + + // ErrSSGenPayeeOuts indicates that the SSGen payee outputs were either not + // the values that would be expected given the rewards and input amounts of + // the original SStx, or that the SSGen addresses did not correctly correspond + // to the null data outputs given in the originating SStx. + ErrSSGenPayeeOuts + + // ErrSSGenSubsidy indicates that there was an error in the amount of subsidy + // generated in the vote. + ErrSSGenSubsidy + + // ErrSStxInImmature indicates that the OP_SSTX tagged output used as input + // was not yet TicketMaturity many blocks old. + ErrSStxInImmature + + // ErrSStxInScrType indicates that the input used in an sstx was not + // pay-to-pubkeyhash or pay-to-script-hash, which is required. It can + // be OP_SS* tagged, but it must be P2PKH or P2SH. + ErrSStxInScrType + + // ErrInvalidSSRtxInput indicates that the input for the SSRtx was not from + // an SStx. + ErrInvalidSSRtxInput + + // ErrSSRtxPayeesMismatch means that the number of payees in an SSRtx was + // not the same as the number of payees in the outputs of the input SStx. + ErrSSRtxPayeesMismatch + + // ErrSSRtxPayees indicates that the SSRtx failed to pay out to the committed + // addresses or amounts from the originating SStx. + ErrSSRtxPayees + + // ErrTxSStxOutSpend indicates that a non SSGen or SSRtx tx attempted to spend + // an OP_SSTX tagged output from an SStx. + ErrTxSStxOutSpend + + // ErrRegTxSpendStakeOut indicates that a regular tx attempted to spend to + // outputs tagged with stake tags, e.g. OP_SSTX. + ErrRegTxSpendStakeOut + + // ErrBIP0030 indicates that a block failed to pass BIP0030. + ErrBIP0030 + + // ErrInvalidFinalState indicates that the final state of the PRNG included + // in the the block differed from the calculated final state. + ErrInvalidFinalState + + // ErrPoolSize indicates an error in the ticket pool size for this block. + ErrPoolSize + + // ErrForceReorgWrongChain indicates that a reroganization was attempted + // to be forced, but the chain indicated was not mirrored by b.bestChain. + ErrForceReorgWrongChain + + // ErrForceReorgMissingChild indicates that a reroganization was attempted + // to be forced, but the child node to reorganize to could not be found. + ErrForceReorgMissingChild + + // ErrBadStakebaseValue indicates that a block's stake tx tree has spent + // more than it is allowed. + ErrBadStakebaseValue + + // ErrDiscordantTxTree specifies that a given origin tx's content + // indicated that it should exist in a different tx tree than the + // one given in the TxIn outpoint. + ErrDiscordantTxTree + + // ErrStakeFees indicates an error with the fees found in the stake + // transaction tree. + ErrStakeFees + + // ErrNoStakeTx indicates there were no stake transactions found in a + // block after stake validation height. + ErrNoStakeTx + + // ErrBadBlockHeight indicates that a block header's embedded block height + // was different from where it was actually embedded in the block chain. + ErrBadBlockHeight + + // ErrBlockOneTx indicates that block height 1 failed to correct generate + // the block one premine transaction. + ErrBlockOneTx + + // ErrBlockOneTx indicates that block height 1 coinbase transaction in + // zero was incorrect in some way. + ErrBlockOneInputs + + // ErrBlockOneOutputs indicates that block height 1 failed to incorporate + // the ledger addresses correctly into the transaction's outputs. + ErrBlockOneOutputs + + // ErrNoTax indicates that there was no tax present in the coinbase of a + // block after height 1. + ErrNoTax + + // ErrExpiredTx indicates that the transaction is currently expired. + ErrExpiredTx + + // ErrExpiryTxSpentEarly indicates that an output from a transaction + // that included an expiry field was spent before coinbase maturity + // many blocks had passed in the blockchain. + ErrExpiryTxSpentEarly + + // ErrFraudAmountIn indicates the witness amount given was fraudulent. + ErrFraudAmountIn + + // ErrFraudBlockHeight indicates the witness block height given was fraudulent. + ErrFraudBlockHeight + + // ErrFraudBlockIndex indicates the witness block index given was fraudulent. + ErrFraudBlockIndex + + // ErrZeroValueOutputSpend indicates that a transaction attempted to spend a + // zero value output. + ErrZeroValueOutputSpend + + // ErrInvalidEarlyVoteBits indicates that a block before stake validation + // height had an unallowed vote bits value. + ErrInvalidEarlyVoteBits ) // Map of ErrorCode values back to their constant names for pretty printing. var errorCodeStrings = map[ErrorCode]string{ - ErrDuplicateBlock: "ErrDuplicateBlock", - ErrBlockTooBig: "ErrBlockTooBig", - ErrBlockVersionTooOld: "ErrBlockVersionTooOld", - ErrInvalidTime: "ErrInvalidTime", - ErrTimeTooOld: "ErrTimeTooOld", - ErrTimeTooNew: "ErrTimeTooNew", - ErrDifficultyTooLow: "ErrDifficultyTooLow", - ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty", - ErrHighHash: "ErrHighHash", - ErrBadMerkleRoot: "ErrBadMerkleRoot", - ErrBadCheckpoint: "ErrBadCheckpoint", - ErrForkTooOld: "ErrForkTooOld", - ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld", - ErrNoTransactions: "ErrNoTransactions", - ErrTooManyTransactions: "ErrTooManyTransactions", - ErrNoTxInputs: "ErrNoTxInputs", - ErrNoTxOutputs: "ErrNoTxOutputs", - ErrTxTooBig: "ErrTxTooBig", - ErrBadTxOutValue: "ErrBadTxOutValue", - ErrDuplicateTxInputs: "ErrDuplicateTxInputs", - ErrBadTxInput: "ErrBadTxInput", - ErrMissingTx: "ErrMissingTx", - ErrUnfinalizedTx: "ErrUnfinalizedTx", - ErrDuplicateTx: "ErrDuplicateTx", - ErrOverwriteTx: "ErrOverwriteTx", - ErrImmatureSpend: "ErrImmatureSpend", - ErrDoubleSpend: "ErrDoubleSpend", - ErrSpendTooHigh: "ErrSpendTooHigh", - ErrBadFees: "ErrBadFees", - ErrTooManySigOps: "ErrTooManySigOps", - ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase", - ErrMultipleCoinbases: "ErrMultipleCoinbases", - ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen", - ErrBadCoinbaseValue: "ErrBadCoinbaseValue", - ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight", - ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight", - ErrScriptMalformed: "ErrScriptMalformed", - ErrScriptValidation: "ErrScriptValidation", + ErrDuplicateBlock: "ErrDuplicateBlock", + ErrMissingParent: "ErrMissingParent", + ErrBlockTooBig: "ErrBlockTooBig", + ErrWrongBlockSize: "ErrWrongBlockSize", + ErrBlockVersionTooOld: "ErrBlockVersionTooOld", + ErrInvalidTime: "ErrInvalidTime", + ErrTimeTooOld: "ErrTimeTooOld", + ErrTimeTooNew: "ErrTimeTooNew", + ErrDifficultyTooLow: "ErrDifficultyTooLow", + ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty", + ErrHighHash: "ErrHighHash", + ErrBadMerkleRoot: "ErrBadMerkleRoot", + ErrBadCheckpoint: "ErrBadCheckpoint", + ErrForkTooOld: "ErrForkTooOld", + ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld", + ErrNoTransactions: "ErrNoTransactions", + ErrTooManyTransactions: "ErrTooManyTransactions", + ErrNoTxInputs: "ErrNoTxInputs", + ErrNoTxOutputs: "ErrNoTxOutputs", + ErrTxTooBig: "ErrTxTooBig", + ErrBadTxOutValue: "ErrBadTxOutValue", + ErrDuplicateTxInputs: "ErrDuplicateTxInputs", + ErrBadTxInput: "ErrBadTxInput", + ErrMissingTx: "ErrMissingTx", + ErrUnfinalizedTx: "ErrUnfinalizedTx", + ErrDuplicateTx: "ErrDuplicateTx", + ErrOverwriteTx: "ErrOverwriteTx", + ErrImmatureSpend: "ErrImmatureSpend", + ErrDoubleSpend: "ErrDoubleSpend", + ErrSpendTooHigh: "ErrSpendTooHigh", + ErrBadFees: "ErrBadFees", + ErrTooManySigOps: "ErrTooManySigOps", + ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase", + ErrMultipleCoinbases: "ErrMultipleCoinbases", + ErrStakeTxInRegularTree: "ErrStakeTxInRegularTree", + ErrRegTxInStakeTree: "ErrRegTxInStakeTree", + ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen", + ErrBadCoinbaseValue: "ErrBadCoinbaseValue", + ErrBadCoinbaseOutpoint: "ErrBadCoinbaseOutpoint", + ErrBadCoinbaseFraudProof: "ErrBadCoinbaseFraudProof", + ErrBadCoinbaseAmountIn: "ErrBadCoinbaseAmountIn", + ErrBadStakebaseAmountIn: "ErrBadStakebaseAmountIn", + ErrBadStakebaseScriptLen: "ErrBadStakebaseScriptLen", + ErrBadStakevaseScrVal: "ErrBadStakevaseScrVal", + ErrScriptMalformed: "ErrScriptMalformed", + ErrScriptValidation: "ErrScriptValidation", + ErrNotEnoughStake: "ErrNotEnoughStake", + ErrStakeBelowMinimum: "ErrStakeBelowMinimum", + ErrNotEnoughVotes: "ErrNotEnoughVotes", + ErrTooManyVotes: "ErrTooManyVotes", + ErrFreshStakeMismatch: "ErrFreshStakeMismatch", + ErrTooManySStxs: "ErrTooManySStxs", + ErrInvalidEarlyStakeTx: "ErrInvalidEarlyStakeTx", + ErrTicketUnavailable: "ErrTicketUnavailable", + ErrVotesOnWrongBlock: "ErrVotesOnWrongBlock", + ErrVotesMismatch: "ErrVotesMismatch", + ErrIncongruentVotebit: "ErrIncongruentVotebit", + ErrInvalidSSRtx: "ErrInvalidSSRtx", + ErrInvalidRevNum: "ErrInvalidRevNum", + ErrTooManyRevocations: "ErrTooManyRevocations", + ErrSStxCommitment: "ErrSStxCommitment", + ErrUnparseableSSGen: "ErrUnparseableSSGen", + ErrInvalidSSGenInput: "ErrInvalidSSGenInput", + ErrSSGenPayeeOuts: "ErrSSGenPayeeOuts", + ErrSSGenSubsidy: "ErrSSGenSubsidy", + ErrSStxInImmature: "ErrSStxInImmature", + ErrSStxInScrType: "ErrSStxInScrType", + ErrInvalidSSRtxInput: "ErrInvalidSSRtxInput", + ErrSSRtxPayeesMismatch: "ErrSSRtxPayeesMismatch", + ErrSSRtxPayees: "ErrSSRtxPayees", + ErrTxSStxOutSpend: "ErrTxSStxOutSpend", + ErrRegTxSpendStakeOut: "ErrRegTxSpendStakeOut", + ErrInvalidFinalState: "ErrInvalidFinalState", + ErrPoolSize: "ErrPoolSize", + ErrForceReorgWrongChain: "ErrForceReorgWrongChain", + ErrForceReorgMissingChild: "ErrForceReorgMissingChild", + ErrBadStakebaseValue: "ErrBadStakebaseValue", + ErrDiscordantTxTree: "ErrDiscordantTxTree", + ErrStakeFees: "ErrStakeFees", + ErrBadBlockHeight: "ErrBadBlockHeight", + ErrBlockOneTx: "ErrBlockOneTx", + ErrBlockOneInputs: "ErrBlockOneInputs", + ErrBlockOneOutputs: "ErrBlockOneOutputs", + ErrNoTax: "ErrNoTax", + ErrExpiredTx: "ErrExpiredTx", + ErrExpiryTxSpentEarly: "ErrExpiryTxSpentEarly", + ErrFraudAmountIn: "ErrFraudAmountIn", + ErrFraudBlockHeight: "ErrFraudBlockHeight", + ErrFraudBlockIndex: "ErrFraudBlockIndex", + ErrZeroValueOutputSpend: "ErrZeroValueOutputSpend", + ErrInvalidEarlyVoteBits: "ErrInvalidEarlyVoteBits", } // String returns the ErrorCode as a human-readable name. @@ -245,6 +530,11 @@ func (e RuleError) Error() string { return e.Description } +// Error satisfies the error interface and prints human-readable errors. +func (e RuleError) GetCode() ErrorCode { + return e.ErrorCode +} + // ruleError creates an RuleError given a set of arguments. func ruleError(c ErrorCode, desc string) RuleError { return RuleError{ErrorCode: c, Description: desc} diff --git a/blockchain/error_test.go b/blockchain/error_test.go index 640f504c..b241305a 100644 --- a/blockchain/error_test.go +++ b/blockchain/error_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +8,7 @@ package blockchain_test import ( "testing" - "github.com/btcsuite/btcd/blockchain" + "github.com/decred/dcrd/blockchain" ) // TestErrorCodeStringer tests the stringized output for the ErrorCode type. @@ -51,8 +52,6 @@ func TestErrorCodeStringer(t *testing.T) { {blockchain.ErrMultipleCoinbases, "ErrMultipleCoinbases"}, {blockchain.ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"}, {blockchain.ErrBadCoinbaseValue, "ErrBadCoinbaseValue"}, - {blockchain.ErrMissingCoinbaseHeight, "ErrMissingCoinbaseHeight"}, - {blockchain.ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"}, {blockchain.ErrScriptMalformed, "ErrScriptMalformed"}, {blockchain.ErrScriptValidation, "ErrScriptValidation"}, {0xffff, "Unknown ErrorCode (65535)"}, diff --git a/blockchain/example_test.go b/blockchain/example_test.go index 046043dc..e0b4ecba 100644 --- a/blockchain/example_test.go +++ b/blockchain/example_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,16 +9,17 @@ import ( "fmt" "math/big" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrutil" ) // This example demonstrates how to create a new chain instance and use // ProcessBlock to attempt to attempt add a block to the chain. As the package -// overview documentation describes, this includes all of the Bitcoin consensus +// overview documentation describes, this includes all of the Decred consensus // rules. This example intentionally attempts to insert a duplicate genesis // block to illustrate how an invalid block is handled. func ExampleBlockChain_ProcessBlock() { @@ -32,10 +34,11 @@ func ExampleBlockChain_ProcessBlock() { } defer db.Close() + var tmdb *stake.TicketDB // Insert the main network genesis block. This is part of the initial // database setup. Like above, this typically would not be needed when // opening an existing database. - genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) _, err = db.InsertBlock(genesisBlock) if err != nil { fmt.Printf("Failed to insert genesis block: %v\n", err) @@ -43,8 +46,8 @@ func ExampleBlockChain_ProcessBlock() { } // Create a new BlockChain instance using the underlying database for - // the main bitcoin network and ignore notifications. - chain := blockchain.New(db, &chaincfg.MainNetParams, nil) + // the main decred network and ignore notifications. + chain := blockchain.New(db, tmdb, &chaincfg.MainNetParams, nil) // Create a new median time source that is required by the upcoming // call to ProcessBlock. Ordinarily this would also add time values @@ -55,22 +58,24 @@ func ExampleBlockChain_ProcessBlock() { // Process a block. For this example, we are going to intentionally // cause an error by trying to process the genesis block which already // exists. - isOrphan, err := chain.ProcessBlock(genesisBlock, timeSource, blockchain.BFNone) + isOrphan, _, err := chain.ProcessBlock(genesisBlock, timeSource, blockchain.BFNone) if err != nil { fmt.Printf("Failed to process block: %v\n", err) return } fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan) + // This output is dependent on the genesis block, and needs to be + // updated if the mainnet genesis block is updated. // Output: - // Failed to process block: already have block 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f + // Failed to process block: already have block 267a53b5ee86c24a48ec37aee4f4e7c0c4004892b7259e695e9f5b321f1ab9d2 } // This example demonstrates how to convert the compact "bits" in a block header // which represent the target difficulty to a big integer and display it using // the typical hex notation. func ExampleCompactToBig() { - // Convert the bits from block 300000 in the main block chain. + // Convert the bits from block 300000 in the main Decred block chain. bits := uint32(419465580) targetDifficulty := blockchain.CompactToBig(bits) diff --git a/blockchain/internal_test.go b/blockchain/internal_test.go index 87a9ba81..72ddf216 100644 --- a/blockchain/internal_test.go +++ b/blockchain/internal_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -17,22 +18,12 @@ import ( "time" ) -// TstSetCoinbaseMaturity makes the ability to set the coinbase maturity -// available to the test package. -func TstSetCoinbaseMaturity(maturity int64) { - coinbaseMaturity = maturity -} - // TstTimeSorter makes the internal timeSorter type available to the test // package. func TstTimeSorter(times []time.Time) sort.Interface { return timeSorter(times) } -// TstCheckSerializedHeight makes the internal checkSerializedHeight function -// available to the test package. -var TstCheckSerializedHeight = checkSerializedHeight - // TstSetMaxMedianTimeEntries makes the ability to set the maximum number of // median tiem entries available to the test package. func TstSetMaxMedianTimeEntries(val int) { diff --git a/blockchain/log.go b/blockchain/log.go index c80e4d9e..cab1543a 100644 --- a/blockchain/log.go +++ b/blockchain/log.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/blockchain/mediantime.go b/blockchain/mediantime.go index ac0689e2..1970e6d3 100644 --- a/blockchain/mediantime.go +++ b/blockchain/mediantime.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -183,7 +184,7 @@ func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) { // Warn if none of the time samples are close. if !remoteHasCloseTime { log.Warnf("Please check your date and time " + - "are correct! btcd will not work " + + "are correct! dcrd will not work " + "properly with an invalid time") } } diff --git a/blockchain/mediantime_test.go b/blockchain/mediantime_test.go index 44d3105a..a24bc260 100644 --- a/blockchain/mediantime_test.go +++ b/blockchain/mediantime_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/blockchain" + "github.com/decred/dcrd/blockchain" ) // TestMedianTime tests the medianTime implementation. diff --git a/blockchain/merkle.go b/blockchain/merkle.go index 2dc00018..a3011e1c 100644 --- a/blockchain/merkle.go +++ b/blockchain/merkle.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,8 +8,8 @@ package blockchain import ( "math" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) // nextPowerOfTwo returns the next highest power of two from a given number if @@ -28,13 +29,13 @@ func nextPowerOfTwo(n int) int { // HashMerkleBranches takes two hashes, treated as the left and right tree // nodes, and returns the hash of their concatenation. This is a helper // function used to aid in the generation of a merkle tree. -func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash { +func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.Hash { // Concatenate the left and right nodes. - var sha [wire.HashSize * 2]byte - copy(sha[:wire.HashSize], left[:]) - copy(sha[wire.HashSize:], right[:]) + var sha [chainhash.HashSize * 2]byte + copy(sha[:chainhash.HashSize], left[:]) + copy(sha[chainhash.HashSize:], right[:]) - newSha := wire.DoubleSha256SH(sha[:]) + newSha := chainhash.HashFuncH(sha[:]) return &newSha } @@ -45,7 +46,7 @@ func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash { // is stored in a linear array. // // A merkle tree is a tree in which every non-leaf node is the hash of its -// children nodes. A diagram depicting how this works for bitcoin transactions +// children nodes. A diagram depicting how this works for decred transactions // where h(x) is a double sha256 follows: // // root = h1234 = h(h12 + h34) @@ -66,16 +67,26 @@ func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash { // are calculated by concatenating the left node with itself before hashing. // Since this function uses nodes that are pointers to the hashes, empty nodes // will be nil. -func BuildMerkleTreeStore(transactions []*btcutil.Tx) []*wire.ShaHash { +func BuildMerkleTreeStore(transactions []*dcrutil.Tx) []*chainhash.Hash { + // If there's an empty stake tree, return totally zeroed out merkle tree root + // only. + if len(transactions) == 0 { + merkles := make([]*chainhash.Hash, 1) + merkles[0] = &chainhash.Hash{} + return merkles + } + // Calculate how many entries are required to hold the binary merkle // tree as a linear array and create an array of that size. nextPoT := nextPowerOfTwo(len(transactions)) arraySize := nextPoT*2 - 1 - merkles := make([]*wire.ShaHash, arraySize) + merkles := make([]*chainhash.Hash, arraySize) // Create the base transaction shas and populate the array with them. for i, tx := range transactions { - merkles[i] = tx.Sha() + msgTx := tx.MsgTx() + txShaFull := msgTx.TxShaFull() + merkles[i] = &txShaFull } // Start the array offset after the last transaction and adjusted to the diff --git a/blockchain/merkle_test.go b/blockchain/merkle_test.go index 633be110..b472bc64 100644 --- a/blockchain/merkle_test.go +++ b/blockchain/merkle_test.go @@ -1,24 +1,13 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain_test -import ( - "testing" +import () - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcutil" -) - -// TestMerkle tests the BuildMerkleTreeStore API. -func TestMerkle(t *testing.T) { - block := btcutil.NewBlock(&Block100000) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) - calculatedMerkleRoot := merkles[len(merkles)-1] - wantMerkle := &Block100000.Header.MerkleRoot - if !wantMerkle.IsEqual(calculatedMerkleRoot) { - t.Errorf("BuildMerkleTreeStore: merkle root mismatch - "+ - "got %v, want %v", calculatedMerkleRoot, wantMerkle) - } -} +// TODO Make tests for merkle root calculation. Merkle root calculation and +// corruption is already well tested in the blockchain error unit tests and +// reorganization unit tests, but it'd be nice to have a specific test for +// these functions and their error paths. diff --git a/blockchain/notifications.go b/blockchain/notifications.go index 57e97dd2..2f9f79b7 100644 --- a/blockchain/notifications.go +++ b/blockchain/notifications.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,6 +7,10 @@ package blockchain import ( "fmt" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) // NotificationType represents the type of a notification message. @@ -29,14 +34,29 @@ const ( // NTBlockDisconnected indicates the associated block was disconnected // from the main chain. NTBlockDisconnected + + // NTReorganization indicates that a blockchain reorganization is in + // progress. + NTReorganization + + // NTSpentAndMissedTickets indicates spent or missed tickets from a newly + // accepted block. + NTSpentAndMissedTickets + + // NTSpentAndMissedTickets indicates newly maturing tickets from a newly + // accepted block. + NTNewTickets ) // notificationTypeStrings is a map of notification types back to their constant // names for pretty printing. var notificationTypeStrings = map[NotificationType]string{ - NTBlockAccepted: "NTBlockAccepted", - NTBlockConnected: "NTBlockConnected", - NTBlockDisconnected: "NTBlockDisconnected", + NTBlockAccepted: "NTBlockAccepted", + NTBlockConnected: "NTBlockConnected", + NTBlockDisconnected: "NTBlockDisconnected", + NTReorganization: "NTReorganization", + NTSpentAndMissedTickets: "NTSpentAndMissedTickets", + NTNewTickets: "NTNewTickets", } // String returns the NotificationType in human-readable form. @@ -47,12 +67,40 @@ func (n NotificationType) String() string { return fmt.Sprintf("Unknown Notification Type (%d)", int(n)) } +// BlockAcceptedNtfnsData is the structure for data indicating information +// about a block being accepted. +type BlockAcceptedNtfnsData struct { + OnMainChain bool + Block *dcrutil.Block +} + +// ReorganizationNtfnsData is the structure for data indicating information +// about a reorganization. +type ReorganizationNtfnsData struct { + OldHash chainhash.Hash + OldHeight int64 + NewHash chainhash.Hash + NewHeight int64 +} + +// TicketNotificationsData is the structure for new/spent/missed ticket +// notifications at blockchain HEAD that are outgoing from chain. +type TicketNotificationsData struct { + Hash chainhash.Hash + Height int64 + StakeDifficulty int64 + TicketMap stake.SStxMemMap +} + // Notification defines notification that is sent to the caller via the callback // function provided during the call to New and consists of a notification type // as well as associated data that depends on the type as follows: -// - NTBlockAccepted: *btcutil.Block -// - NTBlockConnected: *btcutil.Block -// - NTBlockDisconnected: *btcutil.Block +// - NTBlockAccepted: *BlockAcceptedNtfnsData +// - NTBlockConnected: []*dcrutil.Block of len 2 +// - NTBlockDisconnected: []*dcrutil.Block of len 2 +// - NTReorganization: *ReorganizationNtfnsData +// - NTSpentAndMissedTickets: *TicketNotificationsData +// - NTNewTickets: *TicketNotificationsData type Notification struct { Type NotificationType Data interface{} diff --git a/blockchain/process.go b/blockchain/process.go index b161dd03..9a9666dd 100644 --- a/blockchain/process.go +++ b/blockchain/process.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,8 +8,8 @@ package blockchain import ( "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) // BehaviorFlags is a bitmask defining tweaks to the normal behavior when @@ -38,7 +39,7 @@ const ( // blockExists determines whether a block with the given hash exists either in // the main chain or any side chains. -func (b *BlockChain) blockExists(hash *wire.ShaHash) (bool, error) { +func (b *BlockChain) blockExists(hash *chainhash.Hash) (bool, error) { // Check memory chain first (could be main chain or side chain blocks). if _, ok := b.index[*hash]; ok { return true, nil @@ -55,11 +56,11 @@ func (b *BlockChain) blockExists(hash *wire.ShaHash) (bool, error) { // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to maybeAcceptBlock. -func (b *BlockChain) processOrphans(hash *wire.ShaHash, flags BehaviorFlags) error { +func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) error { // Start with processing at least the passed hash. Leave a little room // for additional orphan blocks that need to be processed without // needing to grow the array in the common case. - processHashes := make([]*wire.ShaHash, 0, 10) + processHashes := make([]*chainhash.Hash, 0, 10) processHashes = append(processHashes, hash) for len(processHashes) > 0 { // Pop the first hash to process from the slice. @@ -90,7 +91,7 @@ func (b *BlockChain) processOrphans(hash *wire.ShaHash, flags BehaviorFlags) err i-- // Potentially accept the block into the block chain. - err := b.maybeAcceptBlock(orphan.block, flags) + _, err := b.maybeAcceptBlock(orphan.block, flags) if err != nil { return err } @@ -109,10 +110,14 @@ func (b *BlockChain) processOrphans(hash *wire.ShaHash, flags BehaviorFlags) err // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // -// It returns a bool which indicates whether or not the block is an orphan and -// any errors that occurred during processing. The returned bool is only valid -// when the error is nil. -func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSource, flags BehaviorFlags) (bool, error) { +// It returns a first bool specifying whether or not the block is on on a fork +// or on a side chain. True means it's on the main chain. +// +// It returns a second bool which indicates whether or not the block is an orphan +// and any errors that occurred during processing. The returned bool is only +// valid when the error is nil. +func (b *BlockChain) ProcessBlock(block *dcrutil.Block, + timeSource MedianTimeSource, flags BehaviorFlags) (bool, bool, error) { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun @@ -122,23 +127,23 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { - return false, err + return false, false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) - return false, ruleError(ErrDuplicateBlock, str) + return false, false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) - return false, ruleError(ErrDuplicateBlock, str) + return false, false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. - err = checkBlockSanity(block, b.chainParams.PowLimit, timeSource, flags) + err = checkBlockSanity(block, timeSource, flags, b.chainParams) if err != nil { - return false, err + return false, false, err } // Find the previous checkpoint and perform some additional checks based @@ -150,7 +155,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou blockHeader := &block.MsgBlock().Header checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { - return false, err + return false, false, err } if checkpointBlock != nil { // Ensure the block timestamp is after the checkpoint timestamp. @@ -160,7 +165,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) - return false, ruleError(ErrCheckpointTimeTooOld, str) + return false, false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the @@ -177,7 +182,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) - return false, ruleError(ErrDifficultyTooLow, str) + return false, false, ruleError(ErrDifficultyTooLow, str) } } } @@ -187,7 +192,7 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou if !prevHash.IsEqual(zeroHash) { prevHashExists, err := b.blockExists(prevHash) if err != nil { - return false, err + return false, false, err } if !prevHashExists { if !dryRun { @@ -196,15 +201,16 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou b.addOrphanBlock(block) } - return true, nil + return false, true, err } } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. - err = b.maybeAcceptBlock(block, flags) + var onMainChain bool + onMainChain, err = b.maybeAcceptBlock(block, flags) if err != nil { - return false, err + return false, false, err } // Don't process any orphans or log when the dry run flag is set. @@ -214,11 +220,11 @@ func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSou // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { - return false, err + return false, false, err } log.Debugf("Accepted block %v", blockHash) } - return false, nil + return onMainChain, false, err } diff --git a/blockchain/reorganization_test.go b/blockchain/reorganization_test.go index 619feed4..aad74b50 100644 --- a/blockchain/reorganization_test.go +++ b/blockchain/reorganization_test.go @@ -1,134 +1,135 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain_test import ( + "bytes" "compress/bzip2" - "encoding/binary" - "io" + "encoding/gob" "os" "path/filepath" - "strings" "testing" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) // TestReorganization loads a set of test blocks which force a chain // reorganization to test the block chain handling code. -// The test blocks were originally from a post on the bitcoin talk forums: -// https://bitcointalk.org/index.php?topic=46370.msg577556#msg577556 func TestReorganization(t *testing.T) { - // Intentionally load the side chain blocks out of order to ensure - // orphans are handled properly along with chain reorganization. - testFiles := []string{ - "blk_0_to_4.dat.bz2", - "blk_4A.dat.bz2", - "blk_5A.dat.bz2", - "blk_3A.dat.bz2", - } - - var blocks []*btcutil.Block - for _, file := range testFiles { - blockTmp, err := loadBlocks(file) - if err != nil { - t.Errorf("Error loading file: %v\n", err) - } - for _, block := range blockTmp { - blocks = append(blocks, block) - } - } - - t.Logf("Number of blocks: %v\n", len(blocks)) - // Create a new database and chain instance to run tests against. - chain, teardownFunc, err := chainSetup("reorg") + chain, teardownFunc, err := chainSetup("reorgunittest", + simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() - // Since we're not dealing with the real block chain, disable - // checkpoints and set the coinbase maturity to 1. - chain.DisableCheckpoints(true) - blockchain.TstSetCoinbaseMaturity(1) - - timeSource := blockchain.NewMedianTime() - expectedOrphans := map[int]struct{}{5: struct{}{}, 6: struct{}{}} - for i := 1; i < len(blocks); i++ { - isOrphan, err := chain.ProcessBlock(blocks[i], timeSource, blockchain.BFNone) - if err != nil { - t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) - return - } - if _, ok := expectedOrphans[i]; !ok && isOrphan { - t.Errorf("ProcessBlock incorrectly returned block %v "+ - "is an orphan\n", i) - } - } - - return -} - -// loadBlocks reads files containing bitcoin block data (gzipped but otherwise -// in the format bitcoind writes) from disk and returns them as an array of -// btcutil.Block. This is largely borrowed from the test code in btcdb. -func loadBlocks(filename string) (blocks []*btcutil.Block, err error) { - filename = filepath.Join("testdata/", filename) - - var network = wire.MainNet - var dr io.Reader - var fi io.ReadCloser - - fi, err = os.Open(filename) + err = chain.GenerateInitialIndex() if err != nil { - return + t.Errorf("GenerateInitialIndex: %v", err) } - if strings.HasSuffix(filename, ".bz2") { - dr = bzip2.NewReader(fi) - } else { - dr = fi + // The genesis block should fail to connect since it's already + // inserted. + genesisBlock := simNetParams.GenesisBlock + err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) + if err == nil { + t.Errorf("CheckConnectBlock: Did not receive expected error") } + + // Load up the rest of the blocks up to HEAD. + filename := filepath.Join("testdata/", "reorgto179.bz2") + fi, err := os.Open(filename) + bcStream := bzip2.NewReader(fi) defer fi.Close() - var block *btcutil.Block + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) - err = nil - for height := int64(1); err == nil; height++ { - var rintbuf uint32 - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - if err == io.EOF { - // hit end of file at expected offset: no warning - height-- - err = nil - break - } - if err != nil { - break - } - if rintbuf != uint32(network) { - break - } - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - blocklen := rintbuf + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockChain := make(map[int64][]byte) - rbytes := make([]byte, blocklen) - - // read block - dr.Read(rbytes) - - block, err = btcutil.NewBlockFromBytes(rbytes) - if err != nil { - return - } - blocks = append(blocks, block) + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockChain); err != nil { + t.Errorf("error decoding test blockchain: %v", err.Error()) } + // Load up the short chain + timeSource := blockchain.NewMedianTime() + finalIdx1 := 179 + for i := 1; i < finalIdx1+1; i++ { + bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) + } + bl.SetHeight(int64(i)) + + _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("ProcessBlock error: %v", err.Error()) + } + } + + // Load the long chain and begin loading blocks from that too, + // forcing a reorganization + // Load up the rest of the blocks up to HEAD. + filename = filepath.Join("testdata/", "reorgto180.bz2") + fi, err = os.Open(filename) + bcStream = bzip2.NewReader(fi) + defer fi.Close() + + // Create a buffer of the read file + bcBuf = new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) + + // Create decoder from the buffer and a map to store the data + bcDecoder = gob.NewDecoder(bcBuf) + blockChain = make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockChain); err != nil { + t.Errorf("error decoding test blockchain: %v", err.Error()) + } + + forkPoint := 131 + finalIdx2 := 180 + for i := forkPoint; i < finalIdx2+1; i++ { + bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) + } + bl.SetHeight(int64(i)) + + _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("ProcessBlock error: %v", err.Error()) + } + } + + // Ensure our blockchain is at the correct best tip + topBlock, _ := chain.GetTopBlock() + tipHash := topBlock.Sha() + expected, _ := chainhash.NewHashFromStr("5ab969d0afd8295b6cd1506f2a310d" + + "259322015c8bd5633f283a163ce0e50594") + if *tipHash != *expected { + t.Errorf("Failed to correctly reorg; expected tip %v, got tip %v", + expected, tipHash) + } + have, err := chain.HaveBlock(expected) + if !have { + t.Errorf("missing tip block after reorganization test") + } + if err != nil { + t.Errorf("unexpected error testing for presence of new tip block "+ + "after reorg test: %v", err) + } return } diff --git a/blockchain/scriptval.go b/blockchain/scriptval.go index ebfea08f..0f2e3545 100644 --- a/blockchain/scriptval.go +++ b/blockchain/scriptval.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,16 +10,16 @@ import ( "math" "runtime" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // txValidateItem holds a transaction along with which input to validate. type txValidateItem struct { txInIndex int txIn *wire.TxIn - tx *btcutil.Tx + tx *dcrutil.Tx } // txValidator provides a type which asynchronously validates transaction @@ -83,8 +84,10 @@ out: // Create a new script engine for the script pair. sigScript := txIn.SignatureScript pkScript := originMsgTx.TxOut[originTxIndex].PkScript + version := originMsgTx.TxOut[originTxIndex].Version + vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(), - txVI.txInIndex, v.flags) + txVI.txInIndex, v.flags, version) if err != nil { str := fmt.Sprintf("failed to parse input "+ "%s:%d which references output %s:%d - "+ @@ -191,7 +194,8 @@ func newTxValidator(txStore TxStore, flags txscript.ScriptFlags) *txValidator { // ValidateTransactionScripts validates the scripts for the passed transaction // using multiple goroutines. -func ValidateTransactionScripts(tx *btcutil.Tx, txStore TxStore, flags txscript.ScriptFlags) error { +func ValidateTransactionScripts(tx *dcrutil.Tx, txStore TxStore, + flags txscript.ScriptFlags) error { // Collect all of the transaction inputs and required information for // validation. txIns := tx.MsgTx().TxIn @@ -217,21 +221,32 @@ func ValidateTransactionScripts(tx *btcutil.Tx, txStore TxStore, flags txscript. } return nil + } // checkBlockScripts executes and validates the scripts for all transactions in // the passed block. -func checkBlockScripts(block *btcutil.Block, txStore TxStore, +// txTree = true is TxTreeRegular, txTree = false is TxTreeStake. +func checkBlockScripts(block *dcrutil.Block, txStore TxStore, txTree bool, scriptFlags txscript.ScriptFlags) error { // Collect all of the transaction inputs and required information for // validation for all transactions in the block into a single slice. numInputs := 0 - for _, tx := range block.Transactions() { + var txs []*dcrutil.Tx + + // TxTreeRegular handling. + if txTree { + txs = block.Transactions() + } else { // TxTreeStake + txs = block.STransactions() + } + + for _, tx := range txs { numInputs += len(tx.MsgTx().TxIn) } txValItems := make([]*txValidateItem, 0, numInputs) - for _, tx := range block.Transactions() { + for _, tx := range txs { for txInIdx, txIn := range tx.MsgTx().TxIn { // Skip coinbases. if txIn.PreviousOutPoint.Index == math.MaxUint32 { diff --git a/blockchain/scriptval_test.go b/blockchain/scriptval_test.go index 0c0e26a4..4b21f8f0 100644 --- a/blockchain/scriptval_test.go +++ b/blockchain/scriptval_test.go @@ -1,46 +1,18 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain_test import ( - "fmt" - "runtime" "testing" - - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/txscript" ) // TestCheckBlockScripts ensures that validating the all of the scripts in a // known-good block doesn't return an error. func TestCheckBlockScripts(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - testBlockNum := 277647 - blockDataFile := fmt.Sprintf("%d.dat.bz2", testBlockNum) - blocks, err := loadBlocks(blockDataFile) - if err != nil { - t.Errorf("Error loading file: %v\n", err) - return - } - if len(blocks) > 1 { - t.Errorf("The test block file must only have one block in it") - } - - txStoreDataFile := fmt.Sprintf("%d.txstore.bz2", testBlockNum) - txStore, err := loadTxStore(txStoreDataFile) - if err != nil { - t.Errorf("Error loading txstore: %v\n", err) - return - } - - scriptFlags := txscript.ScriptBip16 - err = blockchain.TstCheckBlockScripts(blocks[0], txStore, scriptFlags) - if err != nil { - t.Errorf("Transaction script validation failed: %v\n", - err) - return - } + // TODO In the future, add a block here with a lot of tx to validate. + // The blockchain tests already validate a ton of scripts with signatures, + // so we don't really need to make a new test for this immediately. } diff --git a/blockchain/stake/error.go b/blockchain/stake/error.go new file mode 100644 index 00000000..b1071442 --- /dev/null +++ b/blockchain/stake/error.go @@ -0,0 +1,219 @@ +// Copyright (c) 2014 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake + +import ( + "fmt" +) + +// ErrorCode identifies a kind of error. +type ErrorCode int + +// These constants are used to identify a specific RuleError. +const ( + // ErrSStxTooManyInputs indicates that a given SStx contains too many + // inputs. + ErrSStxTooManyInputs = iota + + // ErrSStxTooManyOutputs indicates that a given SStx contains too many + // outputs. + ErrSStxTooManyOutputs + + // ErrSStxNoOutputs indicates that a given SStx has no outputs. + ErrSStxNoOutputs + + // ErrSStxInvalidInput indicates that an invalid output has been used as + // an input for a SStx; only non-SStx tagged outputs may be used to + // purchase stake tickets. + // TODO: Add this into validate + // Ensure that all inputs are not tagged SStx outputs of some sort, + // along with checks to make sure they exist and are available. + ErrSStxInvalidInputs + + // ErrSStxInvalidOutput indicates that the output for an SStx tx is + // invalid; in particular, either the output was not tagged SStx or the + // OP_RETURNs were missing or contained invalid addresses. + ErrSStxInvalidOutputs + + // ErrSStxInOutProportions indicates the the number of inputs in an SStx + // was not equal to the number of output minus one. + ErrSStxInOutProportions + + // ErrSStxBadCommitAmount indicates that a ticket tried to commit 0 or + // a negative value as the commitment amount. + ErrSStxBadCommitAmount + + // ErrSStxBadChangeAmts indicates that the change amount for some SStx + // was invalid, for instance spending more than its inputs. + ErrSStxBadChangeAmts + + // ErrSStxVerifyCalcAmts indicates that passed calculated amounts failed + // to conform to the amounts found in the ticket. + ErrSStxVerifyCalcAmts + + // ErrSSGenWrongNumInputs indicates that a given SSGen tx contains an + // invalid number of inputs. + ErrSSGenWrongNumInputs + + // ErrSSGenTooManyOutputs indicates that a given SSGen tx contains too + // many outputs. + ErrSSGenTooManyOutputs + + // ErrSSGenNoOutputs indicates that a given SSGen has no outputs. + ErrSSGenNoOutputs + + // ErrSSGenWrongIndex indicates that a given SSGen sstx input was not + // using the correct index. + ErrSSGenWrongIndex + + // ErrSSGenWrongTxTree indicates that a given SSGen tx input was not found in + // the stake tx tree. + ErrSSGenWrongTxTree + + // ErrSSGenNoStakebase indicates that the SSGen tx did not contain a + // valid StakeBase in the zeroeth position of inputs. + ErrSSGenNoStakebase + + // ErrSSGenNoReference indicates that there is no reference OP_RETURN + // included as the first output. + ErrSSGenNoReference + + // ErrSSGenNoReference indicates that the OP_RETURN included as the + // first output was corrupted in some way. + ErrSSGenBadReference + + // ErrSSGenNoVotePush indicates that there is no vote bits OP_RETURN + // included as the second output. + ErrSSGenNoVotePush + + // ErrSSGenBadVotePush indicates that the OP_RETURN included as the + // second output was corrupted in some way. + ErrSSGenBadVotePush + + // ErrSSGenBadGenOuts indicates that the something was wrong with the + // stake generation outputs that were present after the first two + // OP_RETURN pushes in an SSGen tx. + ErrSSGenBadGenOuts + + // ErrSSRtxWrongNumInputs indicates that a given SSRtx contains an + // invalid number of inputs. + ErrSSRtxWrongNumInputs + + // ErrSSRtxTooManyOutputs indicates that a given SSRtx contains too many + // outputs. + ErrSSRtxTooManyOutputs + + // ErrSSRtxNoOutputs indicates that a given SSRtx has no outputs. + ErrSSRtxNoOutputs + + // ErrSSRtxWrongTxTree indicates that a given SSRtx input was not found in + // the stake tx tree. + ErrSSRtxWrongTxTree + + // ErrSSRtxBadGenOuts indicates that there was a non-SSRtx tagged output + // present in an SSRtx. + ErrSSRtxBadOuts + + // ErrVerSStxAmts indicates there was an error verifying the calculated + // SStx out amounts and the actual SStx out amounts. + ErrVerSStxAmts + + // ErrVerifyInput indicates that there was an error in verification + // function input. + ErrVerifyInput + + // ErrVerifyOutType indicates that there was a non-equivalence in the + // output type. + ErrVerifyOutType + + // ErrVerifyTooMuchFees indicates that a transaction's output gave + // too much in fees after taking into accounts the limits imposed + // by the SStx output's version field. + ErrVerifyTooMuchFees + + // ErrVerifySpendTooMuch indicates that a transaction's output spent more + // than it was allowed to spend based on the calculated subsidy or return + // for a vote or revocation. + ErrVerifySpendTooMuch + + // ErrVerifyOutputAmt indicates that for a vote/revocation spend output, + // the rule was given that it must exactly match the calculated maximum, + // however the amount in the output did not (e.g. it gave fees). + ErrVerifyOutputAmt + + // ErrVerifyOutPkhs indicates that the recipient of the P2PKH or P2SH + // script was different from that indicated in the SStx input. + ErrVerifyOutPkhs +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrSStxTooManyInputs: "ErrSStxTooManyInputs", + ErrSStxTooManyOutputs: "ErrSStxTooManyOutputs", + ErrSStxNoOutputs: "ErrSStxNoOutputs", + ErrSStxInvalidInputs: "ErrSStxInvalidInputs", + ErrSStxInvalidOutputs: "ErrSStxInvalidOutputs", + ErrSStxInOutProportions: "ErrSStxInOutProportions", + ErrSStxBadCommitAmount: "ErrSStxBadCommitAmount", + ErrSStxBadChangeAmts: "ErrSStxBadChangeAmts", + ErrSStxVerifyCalcAmts: "ErrSStxVerifyCalcAmts", + ErrSSGenWrongNumInputs: "ErrSSGenWrongNumInputs", + ErrSSGenTooManyOutputs: "ErrSSGenTooManyOutputs", + ErrSSGenNoOutputs: "ErrSSGenNoOutputs", + ErrSSGenWrongIndex: "ErrSSGenWrongIndex", + ErrSSGenWrongTxTree: "ErrSSGenWrongTxTree", + ErrSSGenNoStakebase: "ErrSSGenNoStakebase", + ErrSSGenNoReference: "ErrSSGenNoReference", + ErrSSGenBadReference: "ErrSSGenBadReference", + ErrSSGenNoVotePush: "ErrSSGenNoVotePush", + ErrSSGenBadVotePush: "ErrSSGenBadVotePush", + ErrSSGenBadGenOuts: "ErrSSGenBadGenOuts", + ErrSSRtxWrongNumInputs: "ErrSSRtxWrongNumInputs", + ErrSSRtxTooManyOutputs: "ErrSSRtxTooManyOutputs", + ErrSSRtxNoOutputs: "ErrSSRtxNoOutputs", + ErrSSRtxWrongTxTree: "ErrSSRtxWrongTxTree", + ErrSSRtxBadOuts: "ErrSSRtxBadOuts", + ErrVerSStxAmts: "ErrVerSStxAmts", + ErrVerifyInput: "ErrVerifyInput", + ErrVerifyOutType: "ErrVerifyOutType", + ErrVerifyTooMuchFees: "ErrVerifyTooMuchFees", + ErrVerifySpendTooMuch: "ErrVerifySpendTooMuch", + ErrVerifyOutputAmt: "ErrVerifyOutputAmt", + ErrVerifyOutPkhs: "ErrVerifyOutPkhs", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", int(e)) +} + +// RuleError identifies a rule violation. It is used to indicate that +// processing of a block or transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation and access the ErrorCode field to +// ascertain the specific reason for the rule violation. +type StakeRuleError struct { + ErrorCode ErrorCode // Describes the kind of error + Description string // Human readable description of the issue +} + +// Error satisfies the error interface and prints human-readable errors. +func (e StakeRuleError) Error() string { + return e.Description +} + +// Error satisfies the error interface and prints human-readable errors. +func (e StakeRuleError) GetCode() ErrorCode { + return e.ErrorCode +} + +// ruleError creates an RuleError given a set of arguments. +func stakeRuleError(c ErrorCode, desc string) StakeRuleError { + return StakeRuleError{ErrorCode: c, Description: desc} +} diff --git a/blockchain/stake/error_test.go b/blockchain/stake/error_test.go new file mode 100644 index 00000000..b4a16b71 --- /dev/null +++ b/blockchain/stake/error_test.go @@ -0,0 +1,96 @@ +// Copyright (c) 2014 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake_test + +import ( + "testing" + + "github.com/decred/dcrd/blockchain" +) + +// TestErrorCodeStringer tests the stringized output for the ErrorCode type. +func TestErrorCodeStringer(t *testing.T) { + tests := []struct { + in blockchain.ErrorCode + want string + }{ + {blockchain.ErrDuplicateBlock, "ErrDuplicateBlock"}, + {blockchain.ErrBlockTooBig, "ErrBlockTooBig"}, + {blockchain.ErrBlockVersionTooOld, "ErrBlockVersionTooOld"}, + {blockchain.ErrInvalidTime, "ErrInvalidTime"}, + {blockchain.ErrTimeTooOld, "ErrTimeTooOld"}, + {blockchain.ErrTimeTooNew, "ErrTimeTooNew"}, + {blockchain.ErrDifficultyTooLow, "ErrDifficultyTooLow"}, + {blockchain.ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"}, + {blockchain.ErrHighHash, "ErrHighHash"}, + {blockchain.ErrBadMerkleRoot, "ErrBadMerkleRoot"}, + {blockchain.ErrBadCheckpoint, "ErrBadCheckpoint"}, + {blockchain.ErrForkTooOld, "ErrForkTooOld"}, + {blockchain.ErrCheckpointTimeTooOld, "ErrCheckpointTimeTooOld"}, + {blockchain.ErrNoTransactions, "ErrNoTransactions"}, + {blockchain.ErrTooManyTransactions, "ErrTooManyTransactions"}, + {blockchain.ErrNoTxInputs, "ErrNoTxInputs"}, + {blockchain.ErrNoTxOutputs, "ErrNoTxOutputs"}, + {blockchain.ErrTxTooBig, "ErrTxTooBig"}, + {blockchain.ErrBadTxOutValue, "ErrBadTxOutValue"}, + {blockchain.ErrDuplicateTxInputs, "ErrDuplicateTxInputs"}, + {blockchain.ErrBadTxInput, "ErrBadTxInput"}, + {blockchain.ErrBadCheckpoint, "ErrBadCheckpoint"}, + {blockchain.ErrMissingTx, "ErrMissingTx"}, + {blockchain.ErrUnfinalizedTx, "ErrUnfinalizedTx"}, + {blockchain.ErrDuplicateTx, "ErrDuplicateTx"}, + {blockchain.ErrOverwriteTx, "ErrOverwriteTx"}, + {blockchain.ErrImmatureSpend, "ErrImmatureSpend"}, + {blockchain.ErrDoubleSpend, "ErrDoubleSpend"}, + {blockchain.ErrSpendTooHigh, "ErrSpendTooHigh"}, + {blockchain.ErrBadFees, "ErrBadFees"}, + {blockchain.ErrTooManySigOps, "ErrTooManySigOps"}, + {blockchain.ErrFirstTxNotCoinbase, "ErrFirstTxNotCoinbase"}, + {blockchain.ErrMultipleCoinbases, "ErrMultipleCoinbases"}, + {blockchain.ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"}, + {blockchain.ErrBadCoinbaseValue, "ErrBadCoinbaseValue"}, + {blockchain.ErrScriptMalformed, "ErrScriptMalformed"}, + {blockchain.ErrScriptValidation, "ErrScriptValidation"}, + {0xffff, "Unknown ErrorCode (65535)"}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} + +// TestRuleError tests the error output for the RuleError type. +func TestRuleError(t *testing.T) { + tests := []struct { + in blockchain.RuleError + want string + }{ + { + blockchain.RuleError{Description: "duplicate block"}, + "duplicate block", + }, + { + blockchain.RuleError{Description: "human-readable error"}, + "human-readable error", + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.Error() + if result != test.want { + t.Errorf("Error #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} diff --git a/blockchain/stake/log.go b/blockchain/stake/log.go new file mode 100644 index 00000000..e3c1e3a6 --- /dev/null +++ b/blockchain/stake/log.go @@ -0,0 +1,72 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake + +import ( + "errors" + "io" + + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until either UseLogger or SetLogWriter are called. +func DisableLog() { + log = btclog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} + +// SetLogWriter uses a specified io.Writer to output package logging info. +// This allows a caller to direct package logging output without needing a +// dependency on seelog. If the caller is also using btclog, UseLogger should +// be used instead. +func SetLogWriter(w io.Writer, level string) error { + if w == nil { + return errors.New("nil writer") + } + + lvl, ok := btclog.LogLevelFromString(level) + if !ok { + return errors.New("invalid log level") + } + + l, err := btclog.NewLoggerFromWriter(w, lvl) + if err != nil { + return err + } + + UseLogger(l) + return nil +} + +// LogClosure is a closure that can be printed with %v to be used to +// generate expensive-to-create data for a detailed log level and avoid doing +// the work if the data isn't printed. +type logClosure func() string + +func (c logClosure) String() string { + return c() +} + +func newLogClosure(c func() string) logClosure { + return logClosure(c) +} diff --git a/blockchain/stake/lottery.go b/blockchain/stake/lottery.go new file mode 100644 index 00000000..1635d26b --- /dev/null +++ b/blockchain/stake/lottery.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// Contains useful functions for lottery winner and ticket number determination. + +package stake + +import ( + "encoding/binary" + "fmt" + + "github.com/decred/dcrd/chaincfg/chainhash" +) + +// Hash256PRNG is a determinstic pseudorandom number generator that uses a +// 256-bit secure hashing function to generate random uint32s starting from +// an initial seed. +type Hash256PRNG struct { + seed []byte // The seed used to initialize + hashIdx int // Position in the cached hash + idx uint64 // Position in the hash iterator + seedState chainhash.Hash // Hash iterator root hash + lastHash chainhash.Hash // Cached last hash used +} + +// NewHash256PRNG creates a pointer to a newly created hash256PRNG. +func NewHash256PRNG(seed []byte) *Hash256PRNG { + // idx and lastHash are automatically initialized + // as 0. We initialize the seed by appending a constant + // to it and hashing to give 32 bytes. This ensures + // that regardless of the input, the PRNG is always + // doing a short number of rounds because it only + // has to hash < 64 byte messages. The constant is + // derived from the hexadecimal representation of + // pi. + cst := []byte{0x24, 0x3F, 0x6A, 0x88, + 0x85, 0xA3, 0x08, 0xD3} + hp := new(Hash256PRNG) + hp.seed = chainhash.HashFuncB(append(seed, cst...)) + initLH, err := chainhash.NewHash(hp.seed) + if err != nil { + return nil + } + hp.seedState = *initLH + hp.lastHash = *initLH + hp.idx = 0 + return hp +} + +// StateHash returns a hash referencing the current state the deterministic PRNG. +func (hp *Hash256PRNG) StateHash() chainhash.Hash { + fHash := hp.lastHash + fIdx := hp.idx + fHashIdx := hp.hashIdx + + finalState := make([]byte, len(fHash)+4+1) + copy(finalState, fHash[:]) + binary.BigEndian.PutUint32(finalState[len(fHash):], uint32(fIdx)) + finalState[len(fHash)+4] = byte(fHashIdx) + + return chainhash.HashFuncH(finalState) +} + +// hash256Rand returns a uint32 random number using the pseudorandom number +// generator and updates the state. +func (hp *Hash256PRNG) Hash256Rand() uint32 { + r := binary.BigEndian.Uint32(hp.lastHash[hp.hashIdx*4 : hp.hashIdx*4+4]) + hp.hashIdx++ + + // 'roll over' the hash index to use and store it. + if hp.hashIdx > 7 { + idxB := make([]byte, 4, 4) + binary.BigEndian.PutUint32(idxB, uint32(hp.idx)) + hp.lastHash = chainhash.HashFuncH(append(hp.seed, idxB...)) + hp.idx++ + hp.hashIdx = 0 + } + + // 'roll over' the PRNG by re-hashing the seed when + // we overflow idx. + if hp.idx > 0xFFFFFFFF { + hp.seedState = chainhash.HashFuncH(hp.seedState[:]) + hp.lastHash = hp.seedState + hp.idx = 0 + } + + return r +} + +// uniformRandom returns a random in the range [0 ... upperBound) while avoiding +// modulo bias, thus giving a normal distribution within the specified range. +// +// Ported from +// https://github.com/conformal/clens/blob/master/src/arc4random_uniform.c +func (hp *Hash256PRNG) uniformRandom(upperBound uint32) uint32 { + var r, min uint32 + if upperBound < 2 { + return 0 + } + + if upperBound > 0x80000000 { + min = 1 + ^upperBound + } else { + // (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 + min = ((0xFFFFFFFF - (upperBound * 2)) + 1) % upperBound + } + + for { + r = hp.Hash256Rand() + if r >= min { + break + } + } + + return r % upperBound +} + +// intInSlice returns true if an integer is in the passed slice, false otherwise. +func intInSlice(i int, sl []int) bool { + for _, e := range sl { + if i == e { + return true + } + } + + return false +} + +// FindTicketIdxs finds n many unique index numbers for a list length size. +func FindTicketIdxs(size int64, n int, prng *Hash256PRNG) ([]int, error) { + if size < int64(n) { + return nil, fmt.Errorf("list size too small") + } + + if size > 0xFFFFFFFF { + return nil, fmt.Errorf("list size too big") + } + sz := uint32(size) + + var list []int + listLen := 0 + for listLen < n { + r := int(prng.uniformRandom(sz)) + if !intInSlice(r, list) { + list = append(list, r) + listLen++ + } + } + + return list, nil +} diff --git a/blockchain/stake/lottery_test.go b/blockchain/stake/lottery_test.go new file mode 100644 index 00000000..251ad153 --- /dev/null +++ b/blockchain/stake/lottery_test.go @@ -0,0 +1,196 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake_test + +import ( + "bytes" + "encoding/binary" + "math/rand" + "reflect" + "sort" + "testing" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" +) + +func TestBasicPRNG(t *testing.T) { + seed := chainhash.HashFuncB([]byte{0x01}) + prng := stake.NewHash256PRNG(seed) + for i := 0; i < 100000; i++ { + prng.Hash256Rand() + } + + lastHashExp, _ := chainhash.NewHashFromStr("24f1cd72aefbfc85a9d3e21e2eb" + + "732615688d3634bf94499af5a81e0eb45c4e4") + lastHash := prng.StateHash() + if *lastHashExp != lastHash { + t.Errorf("expected final state of %v, got %v", lastHashExp, lastHash) + } +} + +type TicketData struct { + Prefix uint8 // Bucket prefix + SStxHash chainhash.Hash + SpendHash chainhash.Hash + BlockHeight int64 // Block for where the original sstx was located + TxIndex uint32 // Position within a block, in stake tree + Missed bool // Whether or not the ticket was spent + Expired bool // Whether or not the ticket expired +} + +// SStxMemMap is a memory map of SStx keyed to the txHash. +type SStxMemMap map[chainhash.Hash]*TicketData + +func swap(s []byte) []byte { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} + +// TicketDataSlice is a sortable data structure of pointers to TicketData. +type TicketDataSlice []*TicketData + +func NewTicketDataSliceEmpty() TicketDataSlice { + slice := make([]*TicketData, 0) + return TicketDataSlice(slice) +} + +func NewTicketDataSlice(size int) TicketDataSlice { + slice := make([]*TicketData, size) + return TicketDataSlice(slice) +} + +// Less determines which of two *TicketData values is smaller; used for sort. +func (tds TicketDataSlice) Less(i, j int) bool { + cmp := bytes.Compare(tds[i].SStxHash[:], tds[j].SStxHash[:]) + isISmaller := (cmp == -1) + return isISmaller +} + +// Swap swaps two *TicketData values. +func (tds TicketDataSlice) Swap(i, j int) { tds[i], tds[j] = tds[j], tds[i] } + +// Len returns the length of the slice. +func (tds TicketDataSlice) Len() int { return len(tds) } + +func TestLotteryNumSelection(t *testing.T) { + // Test finding ticket indexes. + seed := chainhash.HashFuncB([]byte{0x01}) + prng := stake.NewHash256PRNG(seed) + ticketsInPool := int64(56789) + tooFewTickets := int64(4) + justEnoughTickets := int64(5) + ticketsPerBlock := 5 + + _, err := stake.FindTicketIdxs(tooFewTickets, ticketsPerBlock, prng) + if err == nil { + t.Errorf("got unexpected no error for FindTicketIdxs too few tickets " + + "test") + } + + tickets, err := stake.FindTicketIdxs(ticketsInPool, ticketsPerBlock, prng) + if err != nil { + t.Errorf("got unexpected error for FindTicketIdxs 1 test") + } + ticketsExp := []int{34850, 8346, 27636, 54482, 25482} + if !reflect.DeepEqual(ticketsExp, tickets) { + t.Errorf("Unexpected tickets selected; got %v, want %v", tickets, + ticketsExp) + } + + // Ensure that it can find all suitable ticket numbers in a small + // bucket of tickets. + tickets, err = stake.FindTicketIdxs(justEnoughTickets, ticketsPerBlock, prng) + if err != nil { + t.Errorf("got unexpected error for FindTicketIdxs 2 test") + } + ticketsExp = []int{3, 0, 4, 2, 1} + if !reflect.DeepEqual(ticketsExp, tickets) { + t.Errorf("Unexpected tickets selected; got %v, want %v", tickets, + ticketsExp) + } + + lastHashExp, _ := chainhash.NewHashFromStr("e97ce54aea63a883a82871e752c" + + "6ec3c5731fffc63dafc3767c06861b0b2fa65") + lastHash := prng.StateHash() + if *lastHashExp != lastHash { + t.Errorf("expected final state of %v, got %v", lastHashExp, lastHash) + } +} + +func TestTicketSorting(t *testing.T) { + ticketsPerBlock := 5 + ticketPoolSize := uint16(8192) + totalTickets := uint32(ticketPoolSize) * uint32(5) + bucketsSize := 256 + + randomGen := rand.New(rand.NewSource(12345)) + ticketMap := make([]SStxMemMap, int(bucketsSize), int(bucketsSize)) + + for i := 0; i < bucketsSize; i++ { + ticketMap[i] = make(SStxMemMap) + } + + toMake := int(ticketPoolSize) * ticketsPerBlock + for i := 0; i < toMake; i++ { + td := new(TicketData) + + rint64 := randomGen.Int63n(1 << 62) + randBytes := make([]byte, 8, 8) + binary.LittleEndian.PutUint64(randBytes, uint64(rint64)) + h := chainhash.HashFuncH(randBytes) + td.SStxHash = h + + prefix := byte(h[0]) + + ticketMap[prefix][h] = td + } + + // Pre-sort with buckets (faster). + sortedSlice := make([]*TicketData, 0, totalTickets) + for i := 0; i < bucketsSize; i++ { + tempTdSlice := NewTicketDataSlice(len(ticketMap[i])) + itr := 0 // Iterator + for _, td := range ticketMap[i] { + tempTdSlice[itr] = td + itr++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + sortedSlice1 := sortedSlice + + // However, it should be the same as a sort without the buckets. + toSortSlice := make([]*TicketData, 0, totalTickets) + for i := 0; i < bucketsSize; i++ { + tempTdSlice := make([]*TicketData, len(ticketMap[i]), + len(ticketMap[i])) + itr := 0 // Iterator + for _, td := range ticketMap[i] { + tempTdSlice[itr] = td + itr++ + } + toSortSlice = append(toSortSlice, tempTdSlice...) + } + sortedSlice = NewTicketDataSlice(int(totalTickets)) + copy(sortedSlice, toSortSlice) + sort.Sort(TicketDataSlice(sortedSlice)) + sortedSlice2 := sortedSlice + + if !reflect.DeepEqual(sortedSlice1, sortedSlice2) { + t.Errorf("bucket sort failed to sort to the same slice as global sort") + } +} + +func BenchmarkHashPRNG(b *testing.B) { + seed := chainhash.HashFuncB([]byte{0x01}) + prng := stake.NewHash256PRNG(seed) + + for n := 0; n < b.N; n++ { + prng.Hash256Rand() + } +} diff --git a/blockchain/stake/staketx.go b/blockchain/stake/staketx.go new file mode 100644 index 00000000..b1069b50 --- /dev/null +++ b/blockchain/stake/staketx.go @@ -0,0 +1,1089 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +// +// Contains a collection of functions that determine what type of stake tx a +// given tx is and does a cursory check for sanity. + +package stake + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "math/big" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +// TxType indicates the type of tx (regular or stake type). +type TxType int + +const ( + TxTypeRegular = iota + TxTypeSStx + TxTypeSSGen + TxTypeSSRtx +) + +const ( + // MaxInputsPerSStx is the maximum number of inputs allowed in an SStx. + MaxInputsPerSStx = 64 + + // MaxOutputsPerSStx is the maximum number of outputs allowed in an SStx; + // you need +1 for the tagged SStx output. + MaxOutputsPerSStx = MaxInputsPerSStx*2 + 1 + + // NumInputsPerSSGen is the exact number of inputs for an SSGen + // (stakebase) tx. Inputs are a tagged SStx output and a stakebase (null) + // input. + NumInputsPerSSGen = 2 // SStx and stakebase + + // MaxOutputsPerSSgen is the maximum number of outputs in an SSGen tx, + // which are all outputs to the addresses specified in the OP_RETURNs of + // the original SStx referenced as input plus reference and vote + // OP_RETURN outputs in the zeroeth and first position. + MaxOutputsPerSSGen = MaxInputsPerSStx + 2 + + // NumInputsPerSSRtx is the exact number of inputs for an SSRtx (stake + // revocation tx); the only input should be the SStx output. + NumInputsPerSSRtx = 1 + + // MaxOutputsPerSSRtx is the maximum number of outputs in an SSRtx, which + // are all outputs to the addresses specified in the OP_RETURNs of the + // original SStx referenced as input plus a reference to the block header + // hash of the block in which voting was missed. + MaxOutputsPerSSRtx = MaxInputsPerSStx + + // SStxPKHMinOutSize is the minimum size of of an OP_RETURN commitment output + // for an SStx tx. + SStxPKHMinOutSize = 32 + + // SStxPKHMaxOutSize is the maximum size of of an OP_RETURN commitment output + // for an SStx tx. + SStxPKHMaxOutSize = 77 + + // SSGenBlockReferenceOutSize is the size of a block reference OP_RETURN + // output for an SSGen tx. + SSGenBlockReferenceOutSize = 38 + + // SSGenVoteBitsOutputMinSize is the minimum size for a VoteBits push + // in an SSGen. + SSGenVoteBitsOutputMinSize = 4 + + // SSGenVoteBitsOutputMinSize is the maximum size for a VoteBits push + // in an SSGen. + SSGenVoteBitsOutputMaxSize = 77 + + // maxSingleBytePushLength is the largest maximum push for an + // SStx commitment or VoteBits push. + MaxSingleBytePushLength = 75 + + // SStxVoteReturnFractionMask extracts the return fraction from a + // commitment output version. + // If after applying this mask &0x003f is given, the entire amount of + // the output is allowed to be spent as fees if the flag to allow fees + // is set. + SStxVoteReturnFractionMask = 0x003f + + // SStxRevReturnFractionMask extracts the return fraction from a + // commitment output version. + // If after applying this mask &0x3f00 is given, the entire amount of + // the output is allowed to be spent as fees if the flag to allow fees + // is set. + SStxRevReturnFractionMask = 0x3f00 + + // SStxVoteFractionFlag is a bitflag mask specifying whether or not to + // apply a fractional limit to the amount used for fees in a vote. + // 00000000 00000000 = No fees allowed + // 00000000 01000000 = Apply fees rule + SStxVoteFractionFlag = 0x0040 + + // SStxRevFractionFlag is a bitflag mask specifying whether or not to + // apply a fractional limit to the amount used for fees in a vote. + // 00000000 00000000 = No fees allowed + // 01000000 00000000 = Apply fees rule + SStxRevFractionFlag = 0x4000 + + // fractioningAmount is the amount to divide by after multiplying + // to obtain the limit for the output's amount. + fractioningAmount = 64 +) + +var ( + // validSStxAddressOutPrefix is the valid prefix for a 30-byte + // minimum OP_RETURN push for a commitment for an SStx. + // Example SStx address out: + // 0x6a (OP_RETURN) + // 0x1e (OP_DATA_30, push length: 30 bytes) + // + // 0x?? 0x?? 0x?? 0x?? (20 byte public key hash) + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? + // + // 0x?? 0x?? 0x?? 0x?? (8 byte amount) + // 0x?? 0x?? 0x?? 0x?? + // + // 0x?? 0x?? (2 byte range limits) + validSStxAddressOutMinPrefix = []byte{0x6a, 0x1e} + + // validSSGenReferenceOutPrefix is the valid prefix for a block + // reference output for an SSGen tx. + // Example SStx address out: + // 0x6a (OP_RETURN) + // 0x28 (OP_DATA_40, push length: 40 bytes) + // + // 0x?? 0x?? 0x?? 0x?? (32 byte block header hash for the block + // 0x?? 0x?? 0x?? 0x?? you wish to vote on) + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // 0x?? 0x?? 0x?? 0x?? + // + // 0x?? 0x?? 0x?? 0x?? (4 byte uint32 for the height of the block + // that you wish to vote on) + validSSGenReferenceOutPrefix = []byte{0x6a, 0x24} + + // validSSGenVoteOutMinPrefix is the valid prefix for a vote output for an + // SSGen tx. + // 0x6a (OP_RETURN) + // 0x02 (OP_DATA_2 to OP_DATA_75, push length: 2-75 bytes) + // + // 0x?? 0x?? (VoteBits) ... 0x?? + validSSGenVoteOutMinPrefix = []byte{0x6a, 0x02} + + // zeroHash is the zero value for a wire.ShaHash and is defined as + // a package level variable to avoid the need to create a new instance + // every time a check is needed. + zeroHash = &chainhash.Hash{} + + // rangeLimitMax is the maximum bitshift for a fees limit on an + // sstx commitment output. + rangeLimitMax = uint16(63) +) + +// -------------------------------------------------------------------------------- +// Accessory Stake Functions +// -------------------------------------------------------------------------------- + +// isNullOutpoint determines whether or not a previous transaction output point +// is set. +func isNullOutpoint(tx *dcrutil.Tx) bool { + nullInOP := tx.MsgTx().TxIn[0].PreviousOutPoint + if nullInOP.Index == math.MaxUint32 && nullInOP.Hash.IsEqual(zeroHash) && + nullInOP.Tree == dcrutil.TxTreeRegular { + return true + } + return false +} + +// isNullFraudProof determines whether or not a previous transaction fraud proof +// is set. +func isNullFraudProof(tx *dcrutil.Tx) bool { + txIn := tx.MsgTx().TxIn[0] + switch { + case txIn.BlockHeight != wire.NullBlockHeight: + return false + case txIn.BlockIndex != wire.NullBlockIndex: + return false + } + + return true +} + +// IsStakeBase returns whether or not a tx could be considered as having a +// topically valid stake base present. +func IsStakeBase(tx *dcrutil.Tx) bool { + msgTx := tx.MsgTx() + + // A stake base (SSGen) must only have two transaction inputs. + if len(msgTx.TxIn) != 2 { + return false + } + + // The previous output of a coin base must have a max value index and + // a zero hash, as well as null fraud proofs. + if !isNullOutpoint(tx) { + return false + } + if !isNullFraudProof(tx) { + return false + } + + return true +} + +// GetSStxStakeOutputsInfo takes an SStx as input and scans through its outputs, +// returning the pubkeyhashs and amounts for any NullDataTy's (future commitments +// to stake generation rewards). +func GetSStxStakeOutputInfo(tx *dcrutil.Tx) ([]bool, [][]byte, []int64, []int64, + [][]bool, [][]uint16) { + msgTx := tx.MsgTx() + + isP2SH := make([]bool, len(msgTx.TxIn)) + addresses := make([][]byte, len(msgTx.TxIn)) + amounts := make([]int64, len(msgTx.TxIn)) + changeAmounts := make([]int64, len(msgTx.TxIn)) + allSpendRules := make([][]bool, len(msgTx.TxIn)) + allSpendLimits := make([][]uint16, len(msgTx.TxIn)) + + // Cycle through the inputs and pull the proportional amounts + // and commit to PKHs/SHs. + for idx, out := range msgTx.TxOut { + // We only care about the outputs where we get proportional + // amounts and the PKHs/SHs to send rewards to, which is all + // the odd numbered output indexes. + if (idx > 0) && (idx%2 != 0) { + // The MSB (sign), not used ever normally, encodes whether + // or not it is a P2PKH or P2SH for the input. + amtEncoded := make([]byte, 8, 8) + copy(amtEncoded, out.PkScript[22:30]) + isP2SH[idx/2] = !(amtEncoded[7]&(1<<7) == 0) // MSB set? + amtEncoded[7] &= ^uint8(1 << 7) // Clear bit + + addresses[idx/2] = out.PkScript[2:22] + amounts[idx/2] = int64(binary.LittleEndian.Uint64(amtEncoded)) + + // Get flags and restrictions for the outputs to be + // make in either a vote or revocation. + spendRules := make([]bool, 2, 2) + spendLimits := make([]uint16, 2, 2) + + // This bitflag is true/false. + feeLimitUint16 := binary.LittleEndian.Uint16(out.PkScript[30:32]) + spendRules[0] = (feeLimitUint16 & SStxVoteFractionFlag) == + SStxVoteFractionFlag + spendRules[1] = (feeLimitUint16 & SStxRevFractionFlag) == + SStxRevFractionFlag + allSpendRules[idx/2] = spendRules + + // This is the fraction to use out of 64. + spendLimits[0] = feeLimitUint16 & SStxVoteReturnFractionMask + spendLimits[1] = feeLimitUint16 & SStxRevReturnFractionMask + spendLimits[1] >>= 8 + allSpendLimits[idx/2] = spendLimits + } + + // Here we only care about the change amounts, so scan + // the change outputs (even indices) and save their + // amounts. + if (idx > 0) && (idx%2 == 0) { + changeAmounts[(idx/2)-1] = out.Value + } + } + + return isP2SH, addresses, amounts, changeAmounts, allSpendRules, + allSpendLimits +} + +// GetSSGenStakeOutputInfo takes an SSGen tx as input and scans through its +// outputs, returning the amount of the output and the PKH or SH that it was +// sent to. +func GetSSGenStakeOutputInfo(tx *dcrutil.Tx, params *chaincfg.Params) ([]bool, + [][]byte, []int64, error) { + msgTx := tx.MsgTx() + numOutputsInSSGen := len(msgTx.TxOut) + + isP2SH := make([]bool, numOutputsInSSGen-2) + addresses := make([][]byte, numOutputsInSSGen-2) + amounts := make([]int64, numOutputsInSSGen-2) + + // Cycle through the inputs and generate + for idx, out := range msgTx.TxOut { + // We only care about the outputs where we get proportional + // amounts and the PKHs they were sent to. + if (idx > 1) && (idx < numOutputsInSSGen) { + // Get the PKH or SH it's going to, and what type of + // script it is. + class, addr, _, err := + txscript.ExtractPkScriptAddrs(out.Version, out.PkScript, params) + if err != nil { + return nil, nil, nil, err + } + if class != txscript.StakeGenTy { + return nil, nil, nil, fmt.Errorf("ssgen output included non "+ + "ssgen tagged output in idx %v", idx) + } + subClass, err := txscript.GetStakeOutSubclass(out.PkScript) + if !(subClass == txscript.PubKeyHashTy || + subClass == txscript.ScriptHashTy) { + return nil, nil, nil, fmt.Errorf("bad script type") + } + isP2SH[idx-2] = false + if subClass == txscript.ScriptHashTy { + isP2SH[idx-2] = true + } + + // Get the amount that was sent. + amt := out.Value + addresses[idx-2] = addr[0].ScriptAddress() + amounts[idx-2] = amt + } + } + + return isP2SH, addresses, amounts, nil +} + +// GetSSGenBlockVotedOn takes an SSGen tx and returns the block voted on in the +// first OP_RETURN by hash and height. +func GetSSGenBlockVotedOn(tx *dcrutil.Tx) (chainhash.Hash, uint32, error) { + msgTx := tx.MsgTx() + + // Get the block header hash. + blockSha, err := chainhash.NewHash(msgTx.TxOut[0].PkScript[2:34]) + if err != nil { + return chainhash.Hash{}, 0, err + } + + // Get the block height. + height := binary.LittleEndian.Uint32(msgTx.TxOut[0].PkScript[34:38]) + + return *blockSha, height, nil +} + +// GetSSGenVoteBits takes an SSGen tx as input and scans through its +// outputs, returning the VoteBits of the index 1 output. +func GetSSGenVoteBits(tx *dcrutil.Tx) uint16 { + msgTx := tx.MsgTx() + + votebits := binary.LittleEndian.Uint16(msgTx.TxOut[1].PkScript[2:4]) + + return votebits +} + +// GetSSRtxStakeOutputInfo takes an SSRtx tx as input and scans through its +// outputs, returning the amount of the output and the pkh that it was sent to. +func GetSSRtxStakeOutputInfo(tx *dcrutil.Tx, params *chaincfg.Params) ([]bool, + [][]byte, []int64, error) { + msgTx := tx.MsgTx() + numOutputsInSSRtx := len(msgTx.TxOut) + + isP2SH := make([]bool, numOutputsInSSRtx) + addresses := make([][]byte, numOutputsInSSRtx) + amounts := make([]int64, numOutputsInSSRtx) + + // Cycle through the inputs and generate + for idx, out := range msgTx.TxOut { + // Get the PKH or SH it's going to, and what type of + // script it is. + class, addr, _, err := + txscript.ExtractPkScriptAddrs(out.Version, out.PkScript, params) + if err != nil { + return nil, nil, nil, err + } + if class != txscript.StakeRevocationTy { + return nil, nil, nil, fmt.Errorf("ssrtx output included non "+ + "ssrtx tagged output in idx %v", idx) + } + subClass, err := txscript.GetStakeOutSubclass(out.PkScript) + if !(subClass == txscript.PubKeyHashTy || + subClass == txscript.ScriptHashTy) { + return nil, nil, nil, fmt.Errorf("bad script type") + } + isP2SH[idx] = false + if subClass == txscript.ScriptHashTy { + isP2SH[idx] = true + } + + // Get the amount that was sent. + amt := out.Value + + addresses[idx] = addr[0].ScriptAddress() + amounts[idx] = amt + } + + return isP2SH, addresses, amounts, nil +} + +// GetSStxNullOutputAmounts takes an array of input amounts, change amounts, and a +// ticket purchase amount, calculates the adjusted proportion from the purchase +// amount, stores it in an array, then returns the array. That is, for any given +// SStx, this function calculates the proportional outputs that any single user +// should receive. +// Returns: (1) Fees (2) Output Amounts (3) Error +func GetSStxNullOutputAmounts(amounts []int64, + changeAmounts []int64, + amountTicket int64) (int64, []int64, error) { + lengthAmounts := len(amounts) + + if lengthAmounts != len(changeAmounts) { + errStr := fmt.Sprintf("amounts was not equal in length " + + "to change amounts!") + return 0, nil, errors.New(errStr) + } + + if amountTicket <= 0 { + errStr := fmt.Sprintf("committed amount was too small!") + return 0, nil, stakeRuleError(ErrSStxBadCommitAmount, errStr) + } + + contribAmounts := make([]int64, lengthAmounts) + sum := int64(0) + + // Now we want to get the adjusted amounts. The algorithm is like this: + // 1 foreach amount + // 2 subtract change from input, store + // 3 add this amount to sum + // 4 check sum against the total committed amount + for i := 0; i < lengthAmounts; i++ { + contribAmounts[i] = amounts[i] - changeAmounts[i] + if contribAmounts[i] < 0 { + errStr := fmt.Sprintf("change at idx %v spent more coins than "+ + "allowed (have: %v, spent: %v)", i, amounts[i], changeAmounts[i]) + return 0, nil, stakeRuleError(ErrSStxBadChangeAmts, errStr) + } + + sum += contribAmounts[i] + } + + fees := sum - amountTicket + + return fees, contribAmounts, nil +} + +// GetStakeRewards takes a list of SStx adjusted output amounts, the amount used +// to purchase that ticket, and the reward for an SSGen tx and subsequently +// generates what the outputs should be in the SSGen tx. If used for calculating +// the outputs for an SSRtx, pass 0 for subsidy. +func GetStakeRewards(amounts []int64, + amountTicket int64, + subsidy int64) []int64 { + + outputsAmounts := make([]int64, len(amounts)) + + // SSGen handling + amountWithStakebase := amountTicket + subsidy + + // Get the sum of the amounts contributed between both fees + // and contributions to the ticket. + totalContrib := int64(0) + for _, amount := range amounts { + totalContrib += amount + } + + // Now we want to get the adjusted amounts including the reward. + // The algorithm is like this: + // 1 foreach amount + // 2 amount *= 2^32 + // 3 amount /= amountTicket + // 4 amount *= amountWithStakebase + // 5 amount /= 2^32 + amountWithStakebaseBig := big.NewInt(amountWithStakebase) + totalContribBig := big.NewInt(totalContrib) + + for idx, amount := range amounts { + amountBig := big.NewInt(amount) // We need > 64 bits + + // mul amountWithStakebase + amountBig.Mul(amountBig, amountWithStakebaseBig) + + // mul 2^32 + amountBig.Lsh(amountBig, 32) + + // div totalContrib + amountBig.Div(amountBig, totalContribBig) + + // div 2^32 + amountBig.Rsh(amountBig, 32) + + // make int64 + amountFinal := int64(amountBig.Uint64()) + + outputsAmounts[idx] = amountFinal + } + + return outputsAmounts +} + +// VerifySStxAmounts compares a list of calculated amounts for ticket commitments +// to the list of commitment amounts from the actual SStx. +func VerifySStxAmounts(sstxAmts []int64, sstxCalcAmts []int64) error { + if len(sstxCalcAmts) != len(sstxAmts) { + errStr := fmt.Sprintf("SStx verify error: number of calculated " + + "sstx output values was not equivalent to the number of sstx " + + "commitment outputs") + return stakeRuleError(ErrVerSStxAmts, errStr) + } + + for idx, amt := range sstxCalcAmts { + if !(amt == sstxAmts[idx]) { + errStr := fmt.Sprintf("SStx verify error: at index %v incongruent"+ + "amt %v in SStx calculated reward and amt %v in "+ + "SStx", idx, amt, sstxAmts[idx]) + return stakeRuleError(ErrVerSStxAmts, errStr) + } + } + + return nil +} + +// VerifyStakingPkhsAndAmounts takes the following: +// 1. sstxTypes: A list of types for what the output should be (P2PK or P2SH). +// 2. sstxPkhs: A list of payee PKHs from NullDataTy outputs of an input SStx. +// 3. ssSpendAmts: What the payouts in an SSGen/SSRtx tx actually were. +// 4. ssSpendTypes: A list of types for what the outputs actually were. +// 5. ssSpendPkhs: A list of payee PKHs from OP_SSGEN tagged outputs of the SSGen +// or SSRtx. +// 6. ssSpendCalcAmts: A list of payee amounts that was calculated based on +// the input SStx. These are the maximum possible amounts that can be +// transacted from this output. +// 7. isVote: Whether this is a vote (true) or revocation (false). +// 8. spendRules: Spending rules for each output in terms of fees allowable +// as extracted from the origin output Version. +// 9. spendLimits: Spending limits for each output in terms of fees allowable +// as extracted from the origin output Version. +// +// and determines if the two pairs of slices are congruent or not. +func VerifyStakingPkhsAndAmounts( + sstxTypes []bool, + sstxPkhs [][]byte, + ssSpendAmts []int64, + ssSpendTypes []bool, + ssSpendPkhs [][]byte, + ssSpendCalcAmts []int64, + isVote bool, + spendRules [][]bool, + spendLimits [][]uint16) error { + + if len(sstxTypes) != len(ssSpendTypes) { + errStr := fmt.Sprintf("Staking verify error: number of " + + "sstx type values was not equivalent to the number " + + "of ss*** type values") + return stakeRuleError(ErrVerifyInput, errStr) + } + + if len(ssSpendAmts) != len(ssSpendCalcAmts) { + errStr := fmt.Sprintf("Staking verify error: number of " + + "sstx output values was not equivalent to the number " + + "of ss*** output values") + return stakeRuleError(ErrVerifyInput, errStr) + } + + if len(sstxPkhs) != len(ssSpendPkhs) { + errStr := fmt.Sprintf("Staking verify error: number of " + + "sstx output pks was not equivalent to the number " + + "of ss*** output pkhs") + return stakeRuleError(ErrVerifyInput, errStr) + } + + for idx, typ := range sstxTypes { + if typ != ssSpendTypes[idx] { + errStr := fmt.Sprintf("SStx in/SS*** out verify error: at index %v "+ + "non-equivalent type %x in SStx and type %x in SS***", idx, typ, + ssSpendTypes[idx]) + return stakeRuleError(ErrVerifyOutType, errStr) + } + } + + for idx, amt := range ssSpendAmts { + rule := false + limit := uint16(0) + if isVote { + // Vote. + rule = spendRules[idx][0] + limit = spendLimits[idx][0] + } else { + // Revocation. + rule = spendRules[idx][1] + limit = spendLimits[idx][1] + } + + // Apply the spending rules and see if the transaction is within + // the specified limits if it asks us to. + if rule { + // If 63 is given, the entire amount may be used as a fee. + // Obviously we can't allow shifting 1 63 places because + // we'd get a negative number. + feeAllowance := ssSpendCalcAmts[idx] + if limit < rangeLimitMax { + if int64(1< amtLimitHigh { + errStr := fmt.Sprintf("at index %v amt max limit was "+ + "calculated to be %v yet the actual amount output "+ + "was %v", idx, amtLimitHigh, amt) + return stakeRuleError(ErrVerifySpendTooMuch, errStr) + } + } else { + // Fees are disabled. + if amt != ssSpendCalcAmts[idx] { + errStr := fmt.Sprintf("SStx in/SS*** out verify error: "+ + "at index %v non-equivalent amt %v in SStx and amt "+ + "%v in SS***", idx, amt, ssSpendCalcAmts[idx]) + return stakeRuleError(ErrVerifyOutputAmt, errStr) + } + } + } + + for idx, pkh := range sstxPkhs { + if !bytes.Equal(pkh, ssSpendPkhs[idx]) { + errStr := fmt.Sprintf("SStx in/SS*** out verify error: at index %v "+ + "non-equivalent pkh %x in SStx and pkh %x in SS***", idx, pkh, + ssSpendPkhs[idx]) + return stakeRuleError(ErrVerifyOutPkhs, errStr) + } + } + + return nil +} + +// -------------------------------------------------------------------------------- +// Stake Transaction Identification Functions +// -------------------------------------------------------------------------------- + +// IsSStx returns whether or not a transaction is an SStx. It does some +// simple validation steps to make sure the number of inputs, number of +// outputs, and the input/output scripts are valid. +// +// SStx transactions are specified as below. +// Inputs: +// untagged output 1 [index 0] +// untagged output 2 [index 1] +// ... +// untagged output MaxInputsPerSStx [index MaxInputsPerSStx-1] +// +// Outputs: +// OP_SSTX tagged output [index 0] +// OP_RETURN push of input 1's address for reward receiving [index 1] +// OP_SSTXCHANGE tagged output for input 1 [index 2] +// OP_RETURN push of input 2's address for reward receiving [index 3] +// OP_SSTXCHANGE tagged output for input 2 [index 4] +// ... +// OP_RETURN push of input MaxInputsPerSStx's address for reward receiving +// [index (MaxInputsPerSStx*2)-2] +// OP_SSTXCHANGE tagged output [index (MaxInputsPerSStx*2)-1] +// +// The output OP_RETURN pushes should be of size 20 bytes (standard address). +// +// The errors in this function can be ignored if you want to use it in to +// identify SStx from a list of stake tx. +func IsSStx(tx *dcrutil.Tx) (bool, error) { + msgTx := tx.MsgTx() + + // Check to make sure there aren't too many inputs. + // CheckTransactionSanity already makes sure that number of inputs is + // greater than 0, so no need to check that. + if len(msgTx.TxIn) > MaxInputsPerSStx { + return false, stakeRuleError(ErrSStxTooManyInputs, "SStx has too many "+ + "inputs") + } + + // Check to make sure there aren't too many outputs. + if len(msgTx.TxOut) > MaxOutputsPerSStx { + return false, stakeRuleError(ErrSStxTooManyOutputs, "SStx has too many "+ + "outputs") + } + + // Check to make sure there are some outputs. + if len(msgTx.TxOut) == 0 { + return false, stakeRuleError(ErrSStxNoOutputs, "SStx has no "+ + "outputs") + } + + // Check to make sure that all output scripts are the default version. + for idx, txOut := range msgTx.TxOut { + if txOut.Version != txscript.DefaultScriptVersion { + errStr := fmt.Sprintf("invalid script version found in "+ + "txOut idx %v", idx) + return false, stakeRuleError(ErrSStxInvalidOutputs, errStr) + } + } + + // Ensure that the first output is tagged OP_SSTX. + if txscript.GetScriptClass(msgTx.TxOut[0].Version, msgTx.TxOut[0].PkScript) != + txscript.StakeSubmissionTy { + return false, stakeRuleError(ErrSStxInvalidOutputs, "First SStx output "+ + "should have been OP_SSTX tagged, but it was not") + } + + // Ensure that the number of outputs is equal to the number of inputs + // + 1. + if (len(msgTx.TxIn)*2 + 1) != len(msgTx.TxOut) { + return false, stakeRuleError(ErrSStxInOutProportions, "The number of "+ + "inputs in the SStx tx was not the number of outputs/2 - 1") + } + + // Ensure that the rest of the odd outputs are 28-byte OP_RETURN pushes that + // contain putative pubkeyhashes, and that the rest of the odd outputs are + // OP_SSTXCHANGE tagged. + for outTxIndex := 1; outTxIndex < len(msgTx.TxOut); outTxIndex++ { + scrVersion := msgTx.TxOut[outTxIndex].Version + rawScript := msgTx.TxOut[outTxIndex].PkScript + + // Check change outputs. + if outTxIndex%2 == 0 { + if txscript.GetScriptClass(scrVersion, rawScript) != + txscript.StakeSubChangeTy { + str := fmt.Sprintf("SStx output at output index %d was not "+ + "an sstx change output", outTxIndex) + return false, stakeRuleError(ErrSStxInvalidOutputs, str) + } + continue + } + + // Else (odd) check commitment outputs. The script should be a + // NullDataTy output. + if txscript.GetScriptClass(scrVersion, rawScript) != + txscript.NullDataTy { + str := fmt.Sprintf("SStx output at output index %d was not "+ + "a NullData (OP_RETURN) push", outTxIndex) + return false, stakeRuleError(ErrSStxInvalidOutputs, str) + } + + // The length of the output script should be between 32 and 77 bytes long. + if len(rawScript) < SStxPKHMinOutSize || + len(rawScript) > SStxPKHMaxOutSize { + str := fmt.Sprintf("SStx output at output index %d was a "+ + "NullData (OP_RETURN) push of the wrong size", outTxIndex) + return false, stakeRuleError(ErrSStxInvalidOutputs, str) + } + + // The OP_RETURN output script prefix should conform to the standard. + outputScriptBuffer := bytes.NewBuffer(rawScript) + outputScriptPrefix := outputScriptBuffer.Next(2) + + minPush := uint8(validSStxAddressOutMinPrefix[1]) + maxPush := uint8(validSStxAddressOutMinPrefix[1]) + + (MaxSingleBytePushLength - minPush) + pushLen := uint8(outputScriptPrefix[1]) + pushLengthValid := (pushLen >= minPush) && (pushLen <= maxPush) + // The first byte should be OP_RETURN, while the second byte should be a + // valid push length. + if !(outputScriptPrefix[0] == validSStxAddressOutMinPrefix[0]) || + !pushLengthValid { + errStr := fmt.Sprintf("sstx commitment at output idx %v had "+ + "an invalid prefix", outTxIndex) + return false, stakeRuleError(ErrSStxInvalidOutputs, + errStr) + } + } + + return true, nil +} + +// IsSSGen returns whether or not a transaction is an SSGen tx. It does some +// simple validation steps to make sure the number of inputs, number of +// outputs, and the input/output scripts are valid. +// +// This does NOT check to see if the subsidy is valid or whether or not the +// value of input[0] + subsidy = value of the outputs. +// +// SSGen transactions are specified as below. +// Inputs: +// Stakebase null input [index 0] +// SStx-tagged output [index 1] +// +// Outputs: +// OP_RETURN push of 40 bytes containing: [index 0] +// i. 32-byte block header of block being voted on. +// ii. 8-byte int of this block's height. +// OP_RETURN push of 2 bytes containing votebits [index 1] +// SSGen-tagged output to address from SStx-tagged output's tx index output 1 +// [index 2] +// SSGen-tagged output to address from SStx-tagged output's tx index output 2 +// [index 3] +// ... +// SSGen-tagged output to address from SStx-tagged output's tx index output +// MaxInputsPerSStx [index MaxOutputsPerSSgen - 1] +// +// The errors in this function can be ignored if you want to use it in to +// identify SSGen from a list of stake tx. +func IsSSGen(tx *dcrutil.Tx) (bool, error) { + msgTx := tx.MsgTx() + + // Check to make sure there aren't too many inputs. + // CheckTransactionSanity already makes sure that number of inputs is + // greater than 0, so no need to check that. + if len(msgTx.TxIn) != NumInputsPerSSGen { + return false, stakeRuleError(ErrSSGenWrongNumInputs, "SSgen tx has an "+ + "invalid number of inputs") + } + + // Check to make sure there aren't too many outputs. + if len(msgTx.TxOut) > MaxOutputsPerSSGen { + return false, stakeRuleError(ErrSSGenTooManyOutputs, "SSgen tx has too "+ + "many outputs") + } + + // Check to make sure there are some outputs. + if len(msgTx.TxOut) == 0 { + return false, stakeRuleError(ErrSSGenNoOutputs, "SSgen tx no "+ + "many outputs") + } + + // Ensure that the first input is a stake base null input. + // Also checks to make sure that there aren't too many or too few inputs. + if !IsStakeBase(tx) { + return false, stakeRuleError(ErrSSGenNoStakebase, "SSGen tx did not "+ + "include a stakebase in the zeroeth input position") + } + + // Check to make sure that the output used as input came from TxTreeStake. + for i, txin := range msgTx.TxIn { + // Skip the stakebase + if i == 0 { + continue + } + + if txin.PreviousOutPoint.Index != 0 { + errStr := fmt.Sprintf("SSGen used an invalid input idx (got %v, "+ + "want 0)", txin.PreviousOutPoint.Index) + return false, stakeRuleError(ErrSSGenWrongIndex, errStr) + } + + if txin.PreviousOutPoint.Tree != dcrutil.TxTreeStake { + return false, stakeRuleError(ErrSSGenWrongTxTree, "SSGen used "+ + "a non-stake input") + } + } + + // Check to make sure that all output scripts are the default version. + for _, txOut := range msgTx.TxOut { + if txOut.Version != txscript.DefaultScriptVersion { + return false, stakeRuleError(ErrSSGenBadGenOuts, "invalid "+ + "script version found in txOut") + } + } + + // Ensure the number of outputs is equal to the number of inputs found in + // the original SStx + 2. + // TODO: Do this in validate, requires DB and valid chain. + + // Ensure that the second input is an SStx tagged output. + // TODO: Do this in validate, as we don't want to actually lookup + // old tx here. This function is for more general sorting. + + // Ensure that the first output is an OP_RETURN push. + zeroethOutputVersion := msgTx.TxOut[0].Version + zeroethOutputScript := msgTx.TxOut[0].PkScript + if txscript.GetScriptClass(zeroethOutputVersion, zeroethOutputScript) != + txscript.NullDataTy { + return false, stakeRuleError(ErrSSGenNoReference, "First SSGen output "+ + "should have been an OP_RETURN data push, but was not") + } + + // Ensure that the first output is the correct size. + if len(zeroethOutputScript) != SSGenBlockReferenceOutSize { + return false, stakeRuleError(ErrSSGenBadReference, "First SSGen output "+ + "should have been 43 bytes long, but was not") + } + + // The OP_RETURN output script prefix for block referencing should + // conform to the standard. + zeroethOutputScriptBuffer := bytes.NewBuffer(zeroethOutputScript) + + zeroethOutputScriptPrefix := zeroethOutputScriptBuffer.Next(2) + if !bytes.Equal(zeroethOutputScriptPrefix, + validSSGenReferenceOutPrefix) { + return false, stakeRuleError(ErrSSGenBadReference, "First SSGen output "+ + "had an invalid prefix") + } + + // Ensure that the block header hash given in the first 32 bytes of the + // OP_RETURN push is a valid block header and found in the main chain. + // TODO: This is validate level stuff, do this there. + + // Ensure that the second output is an OP_RETURN push. + firstOutputVersion := msgTx.TxOut[1].Version + firstOutputScript := msgTx.TxOut[1].PkScript + if txscript.GetScriptClass(firstOutputVersion, firstOutputScript) != + txscript.NullDataTy { + return false, stakeRuleError(ErrSSGenNoVotePush, "Second SSGen output "+ + "should have been an OP_RETURN data push, but was not") + } + + // The length of the output script should be between 4 and 77 bytes long. + if len(firstOutputScript) < SSGenVoteBitsOutputMinSize || + len(firstOutputScript) > SSGenVoteBitsOutputMaxSize { + str := fmt.Sprintf("SSGen votebits output at output index 1 was a " + + "NullData (OP_RETURN) push of the wrong size") + return false, stakeRuleError(ErrSSGenBadVotePush, str) + } + + // The OP_RETURN output script prefix for voting should conform to the + // standard. + firstOutputScriptBuffer := bytes.NewBuffer(firstOutputScript) + firstOutputScriptPrefix := firstOutputScriptBuffer.Next(2) + + minPush := uint8(validSSGenVoteOutMinPrefix[1]) + maxPush := uint8(validSSGenVoteOutMinPrefix[1]) + + (MaxSingleBytePushLength - minPush) + pushLen := uint8(firstOutputScriptPrefix[1]) + pushLengthValid := (pushLen >= minPush) && (pushLen <= maxPush) + // The first byte should be OP_RETURN, while the second byte should be a + // valid push length. + if !(firstOutputScriptPrefix[0] == validSSGenVoteOutMinPrefix[0]) || + !pushLengthValid { + return false, stakeRuleError(ErrSSGenBadVotePush, "Second SSGen output "+ + "had an invalid prefix") + } + + // Ensure that the tx height given in the last 8 bytes is StakeMaturity + // many blocks ahead of the block in which that SStx appears, otherwise + // this ticket has failed to mature and the SStx must be invalid. + // TODO: This is validate level stuff, do this there. + + // Ensure that the remaining outputs are OP_SSGEN tagged. + for outTxIndex := 2; outTxIndex < len(msgTx.TxOut); outTxIndex++ { + scrVersion := msgTx.TxOut[outTxIndex].Version + rawScript := msgTx.TxOut[outTxIndex].PkScript + + // The script should be a OP_SSGEN tagged output. + if txscript.GetScriptClass(scrVersion, rawScript) != + txscript.StakeGenTy { + str := fmt.Sprintf("SSGen tx output at output index %d was not "+ + "an OP_SSGEN tagged output", outTxIndex) + return false, stakeRuleError(ErrSSGenBadGenOuts, str) + } + } + + return true, nil +} + +// IsSSGen returns whether or not a transaction is an SSRtx. It does some +// simple validation steps to make sure the number of inputs, number of +// outputs, and the input/output scripts are valid. +// +// SSRtx transactions are specified as below. +// Inputs: +// SStx-tagged output [index 0] +// +// Outputs: +// SSGen-tagged output to address from SStx-tagged output's tx index output 1 +// [index 0] +// SSGen-tagged output to address from SStx-tagged output's tx index output 2 +// [index 1] +// ... +// SSGen-tagged output to address from SStx-tagged output's tx index output +// MaxInputsPerSStx [index MaxOutputsPerSSRtx - 1] +// +// The errors in this function can be ignored if you want to use it in to +// identify SSRtx from a list of stake tx. +func IsSSRtx(tx *dcrutil.Tx) (bool, error) { + msgTx := tx.MsgTx() + + // Check to make sure there is the correct number of inputs. + // CheckTransactionSanity already makes sure that number of inputs is + // greater than 0, so no need to check that. + if len(msgTx.TxIn) != NumInputsPerSSRtx { + return false, stakeRuleError(ErrSSRtxWrongNumInputs, "SSRtx has an "+ + " invalid number of inputs") + } + + // Check to make sure there aren't too many outputs. + if len(msgTx.TxOut) > MaxOutputsPerSSRtx { + return false, stakeRuleError(ErrSSRtxTooManyOutputs, "SSRtx has too "+ + "many outputs") + } + + // Check to make sure there are some outputs. + if len(msgTx.TxOut) == 0 { + return false, stakeRuleError(ErrSSRtxNoOutputs, "SSRtx has no "+ + "outputs") + } + + // Check to make sure that all output scripts are the default version. + for _, txOut := range msgTx.TxOut { + if txOut.Version != txscript.DefaultScriptVersion { + return false, stakeRuleError(ErrSSRtxBadOuts, "invalid "+ + "script version found in txOut") + } + } + + // Check to make sure that the output used as input came from TxTreeStake. + for _, txin := range msgTx.TxIn { + if txin.PreviousOutPoint.Tree != dcrutil.TxTreeStake { + return false, stakeRuleError(ErrSSRtxWrongTxTree, "SSRtx used "+ + "a non-stake input") + } + } + + // Ensure that the first input is an SStx tagged output. + // TODO: Do this in validate, needs a DB and chain. + + // Ensure that the tx height given in the last 8 bytes is StakeMaturity + // many blocks ahead of the block in which that SStx appear, otherwise + // this ticket has failed to mature and the SStx must be invalid. + // TODO: Do this in validate, needs a DB and chain. + + // Ensure that the outputs are OP_SSRTX tagged. + // Ensure that the tx height given in the last 8 bytes is StakeMaturity + // many blocks ahead of the block in which that SStx appear, otherwise + // this ticket has failed to mature and the SStx must be invalid. + // TODO: This is validate level stuff, do this there. + + // Ensure that the outputs are OP_SSRTX tagged. + for outTxIndex := 0; outTxIndex < len(msgTx.TxOut); outTxIndex++ { + scrVersion := msgTx.TxOut[outTxIndex].Version + rawScript := msgTx.TxOut[outTxIndex].PkScript + + // The script should be a OP_SSRTX tagged output. + if txscript.GetScriptClass(scrVersion, rawScript) != + txscript.StakeRevocationTy { + str := fmt.Sprintf("SSRtx output at output index %d was not "+ + "an OP_SSRTX tagged output", outTxIndex) + return false, stakeRuleError(ErrSSRtxBadOuts, str) + } + } + + // Ensure the number of outputs is equal to the number of inputs found in + // the original SStx. + // TODO: Do this in validate, needs a DB and chain. + + return true, nil +} + +// DetermineTxType determines the type of stake transaction a transaction is; if +// none, it returns that it is an assumed regular tx. +func DetermineTxType(tx *dcrutil.Tx) TxType { + if is, _ := IsSStx(tx); is { + return TxTypeSStx + } + if is, _ := IsSSGen(tx); is { + return TxTypeSSGen + } + if is, _ := IsSSRtx(tx); is { + return TxTypeSSRtx + } + return TxTypeRegular +} + +// SetTxTree analyzes the embedded MsgTx and sets the transaction tree +// accordingly. +func SetTxTree(tx *dcrutil.Tx) { + txType := DetermineTxType(tx) + + indicatedTree := dcrutil.TxTreeRegular + if txType != TxTypeRegular { + indicatedTree = dcrutil.TxTreeStake + } + + tx.SetTree(indicatedTree) +} diff --git a/blockchain/stake/staketx_test.go b/blockchain/stake/staketx_test.go new file mode 100644 index 00000000..94162479 --- /dev/null +++ b/blockchain/stake/staketx_test.go @@ -0,0 +1,2194 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake_test + +import ( + "bytes" + "encoding/hex" + "reflect" + "testing" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +// SSTX TESTING ------------------------------------------------------------------- + +func TestIsSStx(t *testing.T) { + var sstx = dcrutil.NewTx(sstxMsgTx) + sstx.SetTree(dcrutil.TxTreeStake) + sstx.SetIndex(0) + + test, err := stake.IsSStx(sstx) + if test == false || err != nil { + t.Errorf("IsSSTx should have returned true, but instead returned %v"+ + ",%v", test, err) + } + + // --------------------------------------------------------------------------- + // Test for an OP_RETURN commitment push of the maximum size + biggestPush := []byte{ + 0x6a, 0x4b, // OP_RETURN Push 75-bytes + 0x14, 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, // 75 bytes + 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, + } + + sstx = dcrutil.NewTxDeep(sstxMsgTx) + sstx.MsgTx().TxOut[1].PkScript = biggestPush + sstx.SetTree(dcrutil.TxTreeStake) + sstx.SetIndex(0) + + test, err = stake.IsSStx(sstx) + if test == false || err != nil { + t.Errorf("IsSSTx should have returned true, but instead returned %v"+ + ",%v", test, err) + } +} + +func TestIsSSTxErrors(t *testing.T) { + // Initialize the buffer for later manipulation + var buf bytes.Buffer + err := sstxMsgTx.Serialize(&buf) + if err != nil { + t.Errorf("Error serializing the reference sstx: %v", err) + } + bufBytes := buf.Bytes() + + // --------------------------------------------------------------------------- + // Test too many inputs with sstxMsgTxExtraInputs + + var sstxExtraInputs = dcrutil.NewTx(sstxMsgTxExtraInput) + sstxExtraInputs.SetTree(dcrutil.TxTreeStake) + sstxExtraInputs.SetIndex(0) + + test, err := stake.IsSStx(sstxExtraInputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxTooManyInputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxTooManyInputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test too many outputs with sstxMsgTxExtraOutputs + + var sstxExtraOutputs = dcrutil.NewTx(sstxMsgTxExtraOutputs) + sstxExtraOutputs.SetTree(dcrutil.TxTreeStake) + sstxExtraOutputs.SetIndex(0) + + test, err = stake.IsSStx(sstxExtraOutputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxTooManyOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxTooManyOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Check to make sure the first output is OP_SSTX tagged + + var tx wire.MsgTx + testFirstOutTagged := bytes.Replace(bufBytes, + []byte{0x00, 0xe3, 0x23, 0x21, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x1a, 0xba}, + []byte{0x00, 0xe3, 0x23, 0x21, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x19}, + 1) + + // Deserialize the manipulated tx + rbuf := bytes.NewReader(testFirstOutTagged) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var sstxUntaggedOut = dcrutil.NewTx(&tx) + sstxUntaggedOut.SetTree(dcrutil.TxTreeStake) + sstxUntaggedOut.SetIndex(0) + + test, err = stake.IsSStx(sstxUntaggedOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for mismatched number of inputs versus number of outputs + + var sstxInsOutsMismatched = dcrutil.NewTx(sstxMismatchedInsOuts) + sstxInsOutsMismatched.SetTree(dcrutil.TxTreeStake) + sstxInsOutsMismatched.SetIndex(0) + + test, err = stake.IsSStx(sstxInsOutsMismatched) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInOutProportions { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInOutProportions, test, err) + } + + // --------------------------------------------------------------------------- + // Test for bad version of output. + var sstxBadVerOut = dcrutil.NewTx(sstxBadVersionOut) + sstxBadVerOut.SetTree(dcrutil.TxTreeStake) + sstxBadVerOut.SetIndex(0) + + test, err = stake.IsSStx(sstxBadVerOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for second or more output not being OP_RETURN push + + var sstxNoNullData = dcrutil.NewTx(sstxNullDataMissing) + sstxNoNullData.SetTree(dcrutil.TxTreeStake) + sstxNoNullData.SetIndex(0) + + test, err = stake.IsSStx(sstxNoNullData) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for change output being in the wrong place + + var sstxNullDataMis = dcrutil.NewTx(sstxNullDataMisplaced) + sstxNullDataMis.SetTree(dcrutil.TxTreeStake) + sstxNullDataMis.SetIndex(0) + + test, err = stake.IsSStx(sstxNullDataMis) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for too short of a pubkeyhash being given in an OP_RETURN output + + testPKHLength := bytes.Replace(bufBytes, + []byte{ + 0x20, 0x6a, 0x1e, 0x94, 0x8c, 0x76, 0x5a, 0x69, + 0x14, 0xd4, 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, + 0x2c, 0x2f, 0x6b, 0x52, 0xde, 0x3d, 0x7c, + }, + []byte{ + 0x1f, 0x6a, 0x1d, 0x94, 0x8c, 0x76, 0x5a, 0x69, + 0x14, 0xd4, 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, + 0x2c, 0x2f, 0x6b, 0x52, 0xde, 0x3d, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testPKHLength) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var sstxWrongPKHLength = dcrutil.NewTx(&tx) + sstxWrongPKHLength.SetTree(dcrutil.TxTreeStake) + sstxWrongPKHLength.SetIndex(0) + + test, err = stake.IsSStx(sstxWrongPKHLength) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for an invalid OP_RETURN prefix with too big of a push + tooBigPush := []byte{ + 0x6a, 0x4c, 0x4c, // OP_RETURN Push 76-bytes + 0x14, 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, // 76 bytes + 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, + } + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(bufBytes) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + tx.TxOut[1].PkScript = tooBigPush + + var sstxWrongPrefix = dcrutil.NewTx(&tx) + sstxWrongPrefix.SetTree(dcrutil.TxTreeStake) + sstxWrongPrefix.SetIndex(0) + + test, err = stake.IsSStx(sstxWrongPrefix) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxInvalidOutputs { + t.Errorf("IsSSTx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSStxInvalidOutputs, test, err) + } +} + +// SSGEN TESTING ------------------------------------------------------------------ + +func TestIsSSGen(t *testing.T) { + var ssgen = dcrutil.NewTx(ssgenMsgTx) + ssgen.SetTree(dcrutil.TxTreeStake) + ssgen.SetIndex(0) + + test, err := stake.IsSSGen(ssgen) + if test == false || err != nil { + t.Errorf("IsSSGen should have returned true, but instead returned %v"+ + ",%v", test, err) + } + + // Test for an OP_RETURN VoteBits push of the maximum size + biggestPush := []byte{ + 0x6a, 0x4b, // OP_RETURN Push 75-bytes + 0x14, 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, // 75 bytes + 0x3f, 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, 0x3d, 0x7c, 0x7c, 0x7c, 0x7c, + 0x6b, 0x52, 0xde, + } + + ssgen = dcrutil.NewTxDeep(ssgenMsgTx) + ssgen.SetTree(dcrutil.TxTreeStake) + ssgen.SetIndex(0) + ssgen.MsgTx().TxOut[1].PkScript = biggestPush + + test, err = stake.IsSSGen(ssgen) + if test == false || err != nil { + t.Errorf("IsSSGen should have returned true, but instead returned %v"+ + ",%v", test, err) + } + +} + +func TestIsSSGenErrors(t *testing.T) { + // Initialize the buffer for later manipulation + var buf bytes.Buffer + err := ssgenMsgTx.Serialize(&buf) + if err != nil { + t.Errorf("Error serializing the reference sstx: %v", err) + } + bufBytes := buf.Bytes() + + // --------------------------------------------------------------------------- + // Test too many inputs with ssgenMsgTxExtraInputs + + var ssgenExtraInputs = dcrutil.NewTx(ssgenMsgTxExtraInput) + ssgenExtraInputs.SetTree(dcrutil.TxTreeStake) + ssgenExtraInputs.SetIndex(0) + + test, err := stake.IsSSGen(ssgenExtraInputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenWrongNumInputs { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenWrongNumInputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test too many outputs with sstxMsgTxExtraOutputs + + var ssgenExtraOutputs = dcrutil.NewTx(ssgenMsgTxExtraOutputs) + ssgenExtraOutputs.SetTree(dcrutil.TxTreeStake) + ssgenExtraOutputs.SetIndex(0) + + test, err = stake.IsSSGen(ssgenExtraOutputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenTooManyOutputs { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenTooManyOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test 0th input not being stakebase error + + var ssgenStakeBaseWrong = dcrutil.NewTx(ssgenMsgTxStakeBaseWrong) + ssgenStakeBaseWrong.SetTree(dcrutil.TxTreeStake) + ssgenStakeBaseWrong.SetIndex(0) + + test, err = stake.IsSSGen(ssgenStakeBaseWrong) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenNoStakebase { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenNoStakebase, test, err) + } + + // --------------------------------------------------------------------------- + // Wrong tree for inputs test + + // Replace TxTreeStake with TxTreeRegular + testWrongTreeInputs := bytes.Replace(bufBytes, + []byte{0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, 0x00, + 0x00, 0x00, 0x00, 0x01}, + []byte{0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, 0x00, + 0x00, 0x00, 0x00, 0x00}, + 1) + + // Deserialize the manipulated tx + var tx wire.MsgTx + rbuf := bytes.NewReader(testWrongTreeInputs) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenWrongTreeIns = dcrutil.NewTx(&tx) + ssgenWrongTreeIns.SetTree(dcrutil.TxTreeStake) + ssgenWrongTreeIns.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongTreeIns) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenWrongTxTree { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenWrongTxTree, test, err) + } + + // --------------------------------------------------------------------------- + // Test for bad version of output. + var ssgenTxBadVerOut = dcrutil.NewTx(ssgenMsgTxBadVerOut) + ssgenTxBadVerOut.SetTree(dcrutil.TxTreeStake) + ssgenTxBadVerOut.SetIndex(0) + + test, err = stake.IsSSGen(ssgenTxBadVerOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadGenOuts { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadGenOuts, test, err) + } + + // --------------------------------------------------------------------------- + // Test 0th output not being OP_RETURN push + + var ssgenWrongZeroethOut = dcrutil.NewTx(ssgenMsgTxWrongZeroethOut) + ssgenWrongZeroethOut.SetTree(dcrutil.TxTreeStake) + ssgenWrongZeroethOut.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongZeroethOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenNoReference { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenNoReference, test, err) + } + // --------------------------------------------------------------------------- + // Test for too short of an OP_RETURN push being given in the 0th tx out + + testDataPush0Length := bytes.Replace(bufBytes, + []byte{ + 0x26, 0x6a, 0x24, + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, 0x21, + }, + []byte{ + 0x25, 0x6a, 0x23, + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testDataPush0Length) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenWrongDataPush0Length = dcrutil.NewTx(&tx) + ssgenWrongDataPush0Length.SetTree(dcrutil.TxTreeStake) + ssgenWrongDataPush0Length.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongDataPush0Length) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadReference { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadReference, test, err) + } + + // --------------------------------------------------------------------------- + // Test for an invalid OP_RETURN prefix + + testNullData0Prefix := bytes.Replace(bufBytes, + []byte{ + 0x26, 0x6a, 0x24, + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, 0x21, + }, + []byte{ // This uses an OP_PUSHDATA_1 35-byte push to achieve 36 bytes + 0x26, 0x6a, 0x4c, 0x23, + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testNullData0Prefix) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenWrongNullData0Prefix = dcrutil.NewTx(&tx) + ssgenWrongNullData0Prefix.SetTree(dcrutil.TxTreeStake) + ssgenWrongNullData0Prefix.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongNullData0Prefix) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadReference { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadReference, test, err) + } + + // --------------------------------------------------------------------------- + // Test 1st output not being OP_RETURN push + + var ssgenWrongFirstOut = dcrutil.NewTx(ssgenMsgTxWrongFirstOut) + ssgenWrongFirstOut.SetTree(dcrutil.TxTreeStake) + ssgenWrongFirstOut.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongFirstOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenNoVotePush { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenNoVotePush, test, err) + } + // --------------------------------------------------------------------------- + // Test for too short of an OP_RETURN push being given in the 1st tx out + testDataPush1Length := bytes.Replace(bufBytes, + []byte{ + 0x04, 0x6a, 0x02, 0x94, 0x8c, + }, + []byte{ + 0x03, 0x6a, 0x01, 0x94, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testDataPush1Length) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenWrongDataPush1Length = dcrutil.NewTx(&tx) + ssgenWrongDataPush1Length.SetTree(dcrutil.TxTreeStake) + ssgenWrongDataPush1Length.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongDataPush1Length) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadVotePush { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadVotePush, test, err) + } + + // --------------------------------------------------------------------------- + // Test for longer OP_RETURN push being given in the 1st tx out + testDataPush1Length = bytes.Replace(bufBytes, + []byte{ + 0x04, 0x6a, 0x02, 0x94, 0x8c, + }, + []byte{ + 0x06, 0x6a, 0x04, 0x94, 0x8c, 0x8c, 0x8c, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testDataPush1Length) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenLongDataPush1Length = dcrutil.NewTx(&tx) + ssgenLongDataPush1Length.SetTree(dcrutil.TxTreeStake) + ssgenLongDataPush1Length.SetIndex(0) + + test, err = stake.IsSSGen(ssgenLongDataPush1Length) + if test == false || err != nil { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadVotePush, test, err) + } + + // --------------------------------------------------------------------------- + // Test for an invalid OP_RETURN prefix + + testNullData1Prefix := bytes.Replace(bufBytes, + []byte{ + 0x04, 0x6a, 0x02, 0x94, 0x8c, + }, + []byte{ // This uses an OP_PUSHDATA_1 2-byte push to do the push in 5 bytes + 0x05, 0x6a, 0x4c, 0x02, 0x00, 0x00, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testNullData1Prefix) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgenWrongNullData1Prefix = dcrutil.NewTx(&tx) + ssgenWrongNullData1Prefix.SetTree(dcrutil.TxTreeStake) + ssgenWrongNullData1Prefix.SetIndex(0) + + test, err = stake.IsSSGen(ssgenWrongNullData1Prefix) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadVotePush { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadVotePush, test, err) + } + + // --------------------------------------------------------------------------- + // Test for an index 2+ output being not OP_SSGEN tagged + + testGenOutputUntagged := bytes.Replace(bufBytes, + []byte{ + 0x1a, 0xbb, 0x76, 0xa9, 0x14, 0xc3, 0x98, + }, + []byte{ + 0x19, 0x76, 0xa9, 0x14, 0xc3, 0x98, + }, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testGenOutputUntagged) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssgentestGenOutputUntagged = dcrutil.NewTx(&tx) + ssgentestGenOutputUntagged.SetTree(dcrutil.TxTreeStake) + ssgentestGenOutputUntagged.SetIndex(0) + + test, err = stake.IsSSGen(ssgentestGenOutputUntagged) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSGenBadGenOuts { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenBadGenOuts, test, err) + } +} + +// SSRTX TESTING ------------------------------------------------------------------ + +func TestIsSSRtx(t *testing.T) { + var ssrtx = dcrutil.NewTx(ssrtxMsgTx) + ssrtx.SetTree(dcrutil.TxTreeStake) + ssrtx.SetIndex(0) + + test, err := stake.IsSSRtx(ssrtx) + if test == false || err != nil { + t.Errorf("IsSSRtx should have returned true, but instead returned %v"+ + ",%v", test, err) + } +} + +func TestIsSSRtxErrors(t *testing.T) { + // Initialize the buffer for later manipulation + var buf bytes.Buffer + err := ssrtxMsgTx.Serialize(&buf) + if err != nil { + t.Errorf("Error serializing the reference sstx: %v", err) + } + bufBytes := buf.Bytes() + + // --------------------------------------------------------------------------- + // Test too many inputs with ssrtxMsgTxTooManyInputs + + var ssrtxTooManyInputs = dcrutil.NewTx(ssrtxMsgTxTooManyInputs) + ssrtxTooManyInputs.SetTree(dcrutil.TxTreeStake) + ssrtxTooManyInputs.SetIndex(0) + + test, err := stake.IsSSRtx(ssrtxTooManyInputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSRtxWrongNumInputs { + t.Errorf("IsSSRtx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSRtxWrongNumInputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test too many outputs with ssrtxMsgTxTooManyOutputs + + var ssrtxTooManyOutputs = dcrutil.NewTx(ssrtxMsgTxTooManyOutputs) + ssrtxTooManyOutputs.SetTree(dcrutil.TxTreeStake) + ssrtxTooManyOutputs.SetIndex(0) + + test, err = stake.IsSSRtx(ssrtxTooManyOutputs) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSRtxTooManyOutputs { + t.Errorf("IsSSRtx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSRtxTooManyOutputs, test, err) + } + + // --------------------------------------------------------------------------- + // Test for bad version of output. + var ssrtxTxBadVerOut = dcrutil.NewTx(ssrtxMsgTxBadVerOut) + ssrtxTxBadVerOut.SetTree(dcrutil.TxTreeStake) + ssrtxTxBadVerOut.SetIndex(0) + + test, err = stake.IsSSRtx(ssrtxTxBadVerOut) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSRtxBadOuts { + t.Errorf("IsSSRtx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSRtxBadOuts, test, err) + } + + // --------------------------------------------------------------------------- + // Test for an index 0+ output being not OP_SSRTX tagged + testRevocOutputUntagged := bytes.Replace(bufBytes, + []byte{ + 0x1a, 0xbc, 0x76, 0xa9, 0x14, 0xc3, 0x98, + }, + []byte{ + 0x19, 0x76, 0xa9, 0x14, 0xc3, 0x98, + }, + 1) + + // Deserialize the manipulated tx + var tx wire.MsgTx + rbuf := bytes.NewReader(testRevocOutputUntagged) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssrtxTestRevocOutputUntagged = dcrutil.NewTx(&tx) + ssrtxTestRevocOutputUntagged.SetTree(dcrutil.TxTreeStake) + ssrtxTestRevocOutputUntagged.SetIndex(0) + + test, err = stake.IsSSRtx(ssrtxTestRevocOutputUntagged) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSRtxBadOuts { + t.Errorf("IsSSGen should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSRtxBadOuts, test, err) + } + + // --------------------------------------------------------------------------- + // Wrong tree for inputs test + + // Replace TxTreeStake with TxTreeRegular + testWrongTreeInputs := bytes.Replace(bufBytes, + []byte{0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, 0x00, + 0x00, 0x00, 0x00, 0x01}, + []byte{0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, 0x00, + 0x00, 0x00, 0x00, 0x00}, + 1) + + // Deserialize the manipulated tx + rbuf = bytes.NewReader(testWrongTreeInputs) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize error %v", err) + } + + var ssrtxWrongTreeIns = dcrutil.NewTx(&tx) + ssrtxWrongTreeIns.SetTree(dcrutil.TxTreeStake) + ssrtxWrongTreeIns.SetIndex(0) + + test, err = stake.IsSSRtx(ssrtxWrongTreeIns) + if test == true || err.(stake.StakeRuleError).GetCode() != + stake.ErrSSRtxWrongTxTree { + t.Errorf("IsSSRtx should have returned false,%v but instead returned %v"+ + ",%v", stake.ErrSSGenWrongTxTree, test, err) + } +} + +// -------------------------------------------------------------------------------- +// Minor function testing +func TestGetSSGenBlockVotedOn(t *testing.T) { + var ssgen = dcrutil.NewTx(ssgenMsgTx) + ssgen.SetTree(dcrutil.TxTreeStake) + ssgen.SetIndex(0) + + blocksha, height, err := stake.GetSSGenBlockVotedOn(ssgen) + + correctblocksha, _ := chainhash.NewHash( + []byte{ + 0x94, 0x8c, 0x76, 0x5a, // 32 byte hash + 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x52, 0xde, 0x3d, 0x7c, + }) + + correctheight := uint32(0x2123e300) + + if err != nil { + t.Errorf("Error thrown on TestGetSSGenBlockVotedOn: %v", err) + } + + if !reflect.DeepEqual(blocksha, *correctblocksha) { + t.Errorf("Error thrown on TestGetSSGenBlockVotedOn: Looking for "+ + "hash %v, got hash %v", *correctblocksha, blocksha, err) + } + + if height != correctheight { + t.Errorf("Error thrown on TestGetSSGenBlockVotedOn: Looking for "+ + "height %v, got height %v", correctheight, height, err) + } +} + +func TestGetSStxStakeOutputInfo(t *testing.T) { + var sstx = dcrutil.NewTx(sstxMsgTx) + sstx.SetTree(dcrutil.TxTreeStake) + sstx.SetIndex(0) + + correctTyp := true + + correctPkh := []byte{0x94, 0x8c, 0x76, 0x5a, // 20 byte address + 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + } + + correctAmt := int64(0x2123e300) + + correctChange := int64(0x2223e300) + + correctRule := true + + correctLimit := uint16(4) + + typs, pkhs, amts, changeAmts, rules, limits := + stake.GetSStxStakeOutputInfo(sstx) + + if typs[2] != correctTyp { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "type %v, got type %v", correctTyp, typs[1]) + } + + if !reflect.DeepEqual(pkhs[1], correctPkh) { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "pkh %v, got pkh %v", correctPkh, pkhs[1]) + } + + if amts[1] != correctAmt { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "amount %v, got amount %v", correctAmt, amts[1]) + } + + if changeAmts[1] != correctChange { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "amount %v, got amount %v", correctChange, changeAmts[1]) + } + + if rules[1][0] != correctRule { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "rule %v, got rule %v", correctRule, rules[1][0]) + } + + if limits[1][0] != correctLimit { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "limit %v, got limit %v", correctLimit, rules[1][0]) + } +} + +func TestGetSSGenStakeOutputInfo(t *testing.T) { + var ssgen = dcrutil.NewTx(ssgenMsgTx) + ssgen.SetTree(dcrutil.TxTreeStake) + ssgen.SetIndex(0) + + correctTyp := false + + correctpkh := []byte{0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + } + + correctamt := int64(0x2123e300) + + typs, pkhs, amts, err := stake.GetSSGenStakeOutputInfo(ssgen, + &chaincfg.SimNetParams) + if err != nil { + t.Errorf("Got unexpected error: %v", err.Error()) + } + + if typs[0] != correctTyp { + t.Errorf("Error thrown on TestGetSSGenStakeOutputInfo: Looking for "+ + "type %v, got type %v", correctamt, amts[0]) + } + + if !reflect.DeepEqual(pkhs[0], correctpkh) { + t.Errorf("Error thrown on TestGetSSGenStakeOutputInfo: Looking for "+ + "pkh %v, got pkh %v", correctpkh, pkhs[0]) + } + + if amts[0] != correctamt { + t.Errorf("Error thrown on TestGetSSGenStakeOutputInfo: Looking for "+ + "amount %v, got amount %v", correctamt, amts[0]) + } +} + +func TestGetSSGenVoteBits(t *testing.T) { + var ssgen = dcrutil.NewTx(ssgenMsgTx) + ssgen.SetTree(dcrutil.TxTreeStake) + ssgen.SetIndex(0) + + correctvbs := uint16(0x8c94) + + votebits := stake.GetSSGenVoteBits(ssgen) + + if correctvbs != votebits { + t.Errorf("Error thrown on TestGetSSGenVoteBits: Looking for "+ + "amount % x, got amount % x", correctvbs, votebits) + } +} + +func TestGetSSRtxStakeOutputInfo(t *testing.T) { + var ssrtx = dcrutil.NewTx(ssrtxMsgTx) + ssrtx.SetTree(dcrutil.TxTreeStake) + ssrtx.SetIndex(0) + + correctTyp := false + + correctPkh := []byte{0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x33, + } + + correctAmt := int64(0x2122e300) + + typs, pkhs, amts, err := stake.GetSSRtxStakeOutputInfo(ssrtx, + &chaincfg.SimNetParams) + if err != nil { + t.Errorf("Got unexpected error: %v", err.Error()) + } + + if typs[0] != correctTyp { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "type %v, got type %v", correctTyp, typs[0]) + } + + if !reflect.DeepEqual(pkhs[0], correctPkh) { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "pkh %v, got pkh %v", correctPkh, pkhs[0]) + } + + if amts[0] != correctAmt { + t.Errorf("Error thrown on TestGetSStxStakeOutputInfo: Looking for "+ + "amount %v, got amount %v", correctAmt, amts[0]) + } +} + +func TestGetSStxNullOutputAmounts(t *testing.T) { + commitAmts := []int64{int64(0x2122e300), + int64(0x12000000), + int64(0x12300000)} + changeAmts := []int64{int64(0x0122e300), + int64(0x02000000), + int64(0x02300000)} + amtTicket := int64(0x9122e300) + + _, _, err := stake.GetSStxNullOutputAmounts( + []int64{ + int64(0x12000000), + int64(0x12300000), + }, + changeAmts, + amtTicket) + + // len commit to amts != len change amts + lenErrStr := "amounts was not equal in length " + + "to change amounts!" + if err == nil || err.Error() != lenErrStr { + t.Errorf("TestGetSStxNullOutputAmounts unexpected error: %v", err) + } + + // too small amount to commit + _, _, err = stake.GetSStxNullOutputAmounts( + commitAmts, + changeAmts, + int64(0x00000000)) + tooSmallErrStr := "committed amount was too small!" + if err == nil || err.Error() != tooSmallErrStr { + t.Errorf("TestGetSStxNullOutputAmounts unexpected error: %v", err) + } + + // overspending error + tooMuchChangeAmts := []int64{int64(0x0122e300), + int64(0x02000000), + int64(0x12300001)} + + _, _, err = stake.GetSStxNullOutputAmounts( + commitAmts, + tooMuchChangeAmts, + int64(0x00000020)) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrSStxBadChangeAmts { + t.Errorf("TestGetSStxNullOutputAmounts unexpected error: %v", err) + } + + fees, amts, err := stake.GetSStxNullOutputAmounts(commitAmts, + changeAmts, + amtTicket) + + if err != nil { + t.Errorf("TestGetSStxNullOutputAmounts unexpected error: %v", err) + } + + expectedFees := int64(-1361240832) + + if expectedFees != fees { + t.Errorf("TestGetSStxNullOutputAmounts error, wanted %v, "+ + "but got %v", expectedFees, fees) + } + + expectedAmts := []int64{int64(0x20000000), + int64(0x10000000), + int64(0x10000000), + } + + if !reflect.DeepEqual(expectedAmts, amts) { + t.Errorf("TestGetSStxNullOutputAmounts error, wanted %v, "+ + "but got %v", expectedAmts, amts) + } +} + +func TestGetStakeRewards(t *testing.T) { + // SSGen example with >0 subsidy + amounts := []int64{int64(21000000), + int64(11000000), + int64(10000000), + } + amountTicket := int64(42000000) + subsidy := int64(400000) + + outAmts := stake.GetStakeRewards(amounts, amountTicket, subsidy) + + // SSRtx example with 0 subsidy + expectedAmts := []int64{int64(21200000), + int64(11104761), + int64(10095238), + } + + if !reflect.DeepEqual(expectedAmts, outAmts) { + t.Errorf("TestGetStakeRewards error, wanted %v, "+ + "but got %v", expectedAmts, outAmts) + } +} + +func TestVerifySStxAmounts(t *testing.T) { + amounts := []int64{int64(21000000), + int64(11000000), + int64(10000000), + } + calcAmounts := []int64{int64(21000000), + int64(11000000), + int64(10000000), + } + + // len error for slices + calcAmountsBad := []int64{int64(11000000), + int64(10000000), + } + err := stake.VerifySStxAmounts(amounts, + calcAmountsBad) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerSStxAmts { + t.Errorf("TestVerifySStxAmounts unexpected error: %v", err) + } + + // non-congruent slices error + calcAmountsBad = []int64{int64(21000000), + int64(11000000), + int64(10000001), + } + err = stake.VerifySStxAmounts(amounts, + calcAmountsBad) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerSStxAmts { + t.Errorf("TestVerifySStxAmounts unexpected error: %v", err) + } + + err = stake.VerifySStxAmounts(amounts, + calcAmounts) + if err != nil { + t.Errorf("TestVerifySStxAmounts unexpected error: %v", err) + } +} + +func TestVerifyStakingPkhsAndAmounts(t *testing.T) { + types := []bool{false, false} + amounts := []int64{int64(21000000), + int64(11000000), + } + pkhs := [][]byte{ + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00}, + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x04, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x03}} + spendTypes := []bool{false, false} + spendAmounts := []int64{int64(21000000), + int64(11000000), + } + spendPkhs := [][]byte{ + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00}, + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x04, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x03}} + spendRules := [][]bool{ + []bool{false, false}, + []bool{false, false}} + spendLimits := [][]uint16{ + []uint16{16, 20}, + []uint16{16, 20}} + + // bad types len + spendTypesBad := []bool{false} + err := stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypesBad, + spendPkhs, + spendAmounts, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyInput { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // bad types + spendTypesBad = []bool{false, true} + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypesBad, + spendPkhs, + spendAmounts, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyOutType { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // len error for amt slices + spendAmountsBad := []int64{int64(11000111)} + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypes, + spendPkhs, + spendAmountsBad, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyInput { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // len error for pks slices + spendPkhsBad := [][]byte{ + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00}, + } + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypes, + spendPkhsBad, + spendAmounts, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyInput { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // amount non-equivalence in position 1 + spendAmountsNonequiv := []int64{int64(21000000), + int64(11000000)} + spendAmountsNonequiv[1]-- + + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypes, + spendPkhs, + spendAmountsNonequiv, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyOutputAmt { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // pkh non-equivalence in position 1 + spendPkhsNonequiv := [][]byte{ + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00}, + []byte{0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x04, 0x00, + 0x00, 0x01, 0x02, 0x00, + 0x00, 0x01, 0x02, 0x04}} + + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypes, + spendPkhsNonequiv, + spendAmounts, + true, // Vote + spendRules, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyOutPkhs { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // rule non-equivalence in position 1 + spendRulesNonequivV := [][]bool{ + []bool{false, false}, + []bool{true, false}} + spendAmountsNonequivV := []int64{int64(21000000), + int64(10934463)} + spendAmountsNonequivVTooBig := []int64{int64(21000000), + int64(11000001)} + + spendRulesNonequivR := [][]bool{ + []bool{false, false}, + []bool{false, true}} + spendAmountsNonequivR := []int64{int64(21000000), + int64(9951423)} + + // vote + // original amount: 11000000 + // with the flag enabled, the minimum allowed to be spent is + // 11000000 - 1 << 16 = 10934464 + // So, 10934464 should pass while 10934463 should fail. + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + spendAmountsNonequivV, + spendTypes, + spendPkhs, + amounts, + true, // Vote + spendRulesNonequivV, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyTooMuchFees { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // original amount: 11000000 + // the maximum allows to be spent is 11000000 + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + spendAmountsNonequivVTooBig, + spendTypes, + spendPkhs, + amounts, + true, // Vote + spendRulesNonequivV, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifySpendTooMuch { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // revocation + // original amount: 11000000 + // with the flag enabled, the minimum allowed to be spent is + // 11000000 - 1 << 20 = 9951424 + // So, 9951424 should pass while 9951423 should fail. + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + spendAmountsNonequivR, + spendTypes, + spendPkhs, + amounts, + false, // Revocation + spendRulesNonequivR, + spendLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyTooMuchFees { + t.Errorf("TestVerifyStakingPkhsAndAmounts unexpected error: %v", err) + } + + // correct verification + err = stake.VerifyStakingPkhsAndAmounts(types, + pkhs, + amounts, + spendTypes, + spendPkhs, + spendAmounts, + true, // Vote + spendRules, + spendLimits) + if err != nil { + t.Errorf("TestVerifySStxAmounts unexpected error: %v", err) + } +} + +func TestVerifyRealTxs(t *testing.T) { + // Load an SStx and the SSRtx that spends to test some real fee situation + // and confirm the functionality of the functions used. + hexSstx, _ := hex.DecodeString("010000000267cfaa9ce3a50977dcd1015f4f" + + "ce330071a3a9b855210e6646f6434caebda5a60200000001fffffffff6e6004" + + "fd4a0a8d5823c99be0a66a5f9a89c3dd4f7cbf76880098b8ca9d80b0e020000" + + "0001ffffffff05a42df60c0000000000001aba76a914c96206f8a3976057b2e" + + "b846d46d4a909972fc7c788ac00000000000000000000206a1ec96206f8a397" + + "6057b2eb846d46d4a909972fc7c780fe210a000000000054000000000000000" + + "000001abd76a914c96206f8a3976057b2eb846d46d4a909972fc7c788ac0000" + + "0000000000000000206a1ec96206f8a3976057b2eb846d46d4a909972fc7c70" + + "c33d40200000000005474cb4d070000000000001abd76a914c96206f8a39760" + + "57b2eb846d46d4a909972fc7c788ac00000000000000000280fe210a0000000" + + "013030000000000006a47304402200dbc873e69571a4516c4ef869d856386f9" + + "86c8543c0bc9f372ecd22c8606ccb102200f87a8f1b316b7675dfd1706eb22f" + + "331cea14d2e2d5f2c1d88173881a0cd4a04012102716f806d1156d20b9b2482" + + "2bff88549b510f400473536d3ea8d188b9fbe3835680fe210a0000000002030" + + "000040000006b483045022100bc7d0b7aa2c6610b7639f492fa556954ebc52a" + + "9dca5a417be4705ab424255ccd02200a0ccba2e2b7391b93b927f35150c1253" + + "2bdc6f27a8e9eb0bd0bfbc8b9ab13a5012102716f806d1156d20b9b24822bff" + + "88549b510f400473536d3ea8d188b9fbe38356") + sstxMtx := new(wire.MsgTx) + sstxMtx.FromBytes(hexSstx) + sstxTx := dcrutil.NewTx(sstxMtx) + sstxTypes, sstxAddrs, sstxAmts, _, sstxRules, sstxLimits := + stake.GetSStxStakeOutputInfo(sstxTx) + + hexSsrtx, _ := hex.DecodeString("010000000147f4453f244f2589551aea7c714d" + + "771053b667c6612616e9c8fc0e68960a9a100000000001ffffffff0270d7210a00" + + "00000000001abc76a914c96206f8a3976057b2eb846d46d4a909972fc7c788ac0c" + + "33d4020000000000001abc76a914c96206f8a3976057b2eb846d46d4a909972fc7" + + "c788ac000000000000000001ffffffffffffffff00000000ffffffff6b48304502" + + "2100d01c52c3f0c27166e3633d93b5ba821365a73f761e23bb04cc8061a28ab1bd" + + "7d02202bd65a6d16aaefe8b7f56378d58da6650f2e4b20bd5cb659dc9e842ce2d9" + + "15e6012102716f806d1156d20b9b24822bff88549b510f400473536d3ea8d188b9" + + "fbe38356") + ssrtxMtx := new(wire.MsgTx) + ssrtxMtx.FromBytes(hexSsrtx) + ssrtxTx := dcrutil.NewTx(ssrtxMtx) + + ssrtxTypes, ssrtxAddrs, ssrtxAmts, err := + stake.GetSSRtxStakeOutputInfo(ssrtxTx, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("Unexpected GetSSRtxStakeOutputInfo error: %v", err.Error()) + } + + ssrtxCalcAmts := stake.GetStakeRewards(sstxAmts, sstxMtx.TxOut[0].Value, + int64(0)) + + // Here an error is thrown because the second output spends too much. + // Ticket price: 217460132 + // 1: 170000000 - 170000000. 169999218 allowed back (-782 atoms) + // 2: 170000000 - 47461132. 122538868 Change. Paid 1000 fees total. + // 47460913 allowed back (-219 atoms for fee). + // In this test the second output spends 47461132, which is more than + // allowed. + err = stake.VerifyStakingPkhsAndAmounts(sstxTypes, + sstxAddrs, + ssrtxAmts, + ssrtxTypes, + ssrtxAddrs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifySpendTooMuch { + t.Errorf("No or unexpected VerifyStakingPkhsAndAmounts error: %v", + err.Error()) + } + + // Correct this and make sure it passes. + ssrtxTx.MsgTx().TxOut[1].Value = 47460913 + sstxTypes, sstxAddrs, sstxAmts, _, sstxRules, sstxLimits = + stake.GetSStxStakeOutputInfo(sstxTx) + ssrtxTypes, ssrtxAddrs, ssrtxAmts, err = + stake.GetSSRtxStakeOutputInfo(ssrtxTx, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("Unexpected GetSSRtxStakeOutputInfo error: %v", err.Error()) + } + ssrtxCalcAmts = stake.GetStakeRewards(sstxAmts, sstxMtx.TxOut[0].Value, + int64(0)) + err = stake.VerifyStakingPkhsAndAmounts(sstxTypes, + sstxAddrs, + ssrtxAmts, + ssrtxTypes, + ssrtxAddrs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + if err != nil { + t.Errorf("Unexpected VerifyStakingPkhsAndAmounts error: %v", + err) + } + + // Spend too much fees for the limit in the first output and + // make sure it fails. + ssrtxTx.MsgTx().TxOut[0].Value = 0 + sstxTypes, sstxAddrs, sstxAmts, _, sstxRules, sstxLimits = + stake.GetSStxStakeOutputInfo(sstxTx) + ssrtxTypes, ssrtxAddrs, ssrtxAmts, err = + stake.GetSSRtxStakeOutputInfo(ssrtxTx, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("Unexpected GetSSRtxStakeOutputInfo error: %v", err.Error()) + } + ssrtxCalcAmts = stake.GetStakeRewards(sstxAmts, sstxMtx.TxOut[0].Value, + int64(0)) + err = stake.VerifyStakingPkhsAndAmounts(sstxTypes, + sstxAddrs, + ssrtxAmts, + ssrtxTypes, + ssrtxAddrs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + if err == nil || err.(stake.StakeRuleError).GetCode() != + stake.ErrVerifyTooMuchFees { + t.Errorf("No or unexpected VerifyStakingPkhsAndAmounts error: %v", + err.Error()) + } + + // Use everything as fees and make sure both participants are paid + // equally for their contibutions. Both inputs to the SStx are the + // same size, so this is possible. + copy(sstxTx.MsgTx().TxOut[3].PkScript, sstxTx.MsgTx().TxOut[1].PkScript) + sstxTx.MsgTx().TxOut[4].Value = 0 + ssrtxTx.MsgTx().TxOut[0].Value = 108730066 + ssrtxTx.MsgTx().TxOut[1].Value = 108730066 + sstxTypes, sstxAddrs, sstxAmts, _, sstxRules, sstxLimits = + stake.GetSStxStakeOutputInfo(sstxTx) + ssrtxTypes, ssrtxAddrs, ssrtxAmts, err = + stake.GetSSRtxStakeOutputInfo(ssrtxTx, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("Unexpected GetSSRtxStakeOutputInfo error: %v", err.Error()) + } + ssrtxCalcAmts = stake.GetStakeRewards(sstxAmts, sstxMtx.TxOut[0].Value, + int64(0)) + err = stake.VerifyStakingPkhsAndAmounts(sstxTypes, + sstxAddrs, + ssrtxAmts, + ssrtxTypes, + ssrtxAddrs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + if err != nil { + t.Errorf("Unexpected VerifyStakingPkhsAndAmounts error: %v", + err.Error()) + } + if ssrtxCalcAmts[0] != ssrtxCalcAmts[1] { + t.Errorf("Unexpected ssrtxCalcAmts; both values should be same but "+ + "got %v and %v", ssrtxCalcAmts[0], ssrtxCalcAmts[1]) + } +} + +// -------------------------------------------------------------------------------- +// TESTING VARIABLES BEGIN HERE + +// sstxTxIn is the first input in the reference valid sstx +var sstxTxIn = wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + Tree: dcrutil.TxTreeRegular, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: 0xffffffff, +} + +// sstxTxOut0 is the first output in the reference valid sstx +var sstxTxOut0 = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0000, + PkScript: []byte{ + 0xba, // OP_SSTX + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, +} + +// sstxTxOut1 is the second output in the reference valid sstx +var sstxTxOut1 = wire.TxOut{ + Value: 0x00000000, // 0 + Version: 0x0000, + PkScript: []byte{ + 0x6a, // OP_RETURN + 0x1e, // 30 bytes to be pushed + 0x94, 0x8c, 0x76, 0x5a, // 20 byte address + 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, 0x21, // Transaction amount + 0x00, 0x00, 0x00, 0x00, + 0x44, 0x3f, // Fee limits + }, +} + +// sstxTxOut2 is the third output in the reference valid sstx +var sstxTxOut2 = wire.TxOut{ + Value: 0x2223e300, + Version: 0x0000, + PkScript: []byte{ + 0xbd, // OP_SSTXCHANGE + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, +} + +// sstxTxOut3 is another output in an SStx, this time instruction to pay to +// a P2SH output +var sstxTxOut3 = wire.TxOut{ + Value: 0x00000000, // 0 + Version: 0x0000, + PkScript: []byte{ + 0x6a, // OP_RETURN + 0x1e, // 30 bytes to be pushed + 0x94, 0x8c, 0x76, 0x5a, // 20 byte address + 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, 0x21, // Transaction amount + 0x00, 0x00, 0x00, 0x80, // Last byte flagged + 0x44, 0x3f, // Fee limits + }, +} + +// sstxTxOut4 is the another output in the reference valid sstx, and pays change +// to a P2SH address +var sstxTxOut4 = wire.TxOut{ + Value: 0x2223e300, + Version: 0x0000, + PkScript: []byte{ + 0xbd, // OP_SSTXCHANGE + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// sstxTxOut4VerBad is the third output in the reference valid sstx, with a +// bad version. +var sstxTxOut4VerBad = wire.TxOut{ + Value: 0x2223e300, + Version: 0x1234, + PkScript: []byte{ + 0xbd, // OP_SSTXCHANGE + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// sstxMsgTx is a valid SStx MsgTx with an input and outputs and is used in various +// tests +var sstxMsgTx = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + &sstxTxIn, + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, + &sstxTxOut1, + &sstxTxOut2, // emulate change address + &sstxTxOut1, + &sstxTxOut2, // emulate change address + &sstxTxOut3, // P2SH + &sstxTxOut4, // P2SH change + }, + LockTime: 0, + Expiry: 0, +} + +// sstxMsgTxExtraInputs is an invalid SStx MsgTx with too many inputs +var sstxMsgTxExtraInput = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, + &sstxTxOut1, + }, + LockTime: 0, + Expiry: 0, +} + +// sstxMsgTxExtraOutputs is an invalid SStx MsgTx with too many outputs +var sstxMsgTxExtraOutputs = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + &sstxTxOut0, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, &sstxTxOut1, + }, + LockTime: 0, + Expiry: 0, +} + +// sstxMismatchedInsOuts is an invalid SStx MsgTx with too many outputs for the +// number of inputs it has +var sstxMismatchedInsOuts = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, &sstxTxOut1, &sstxTxOut2, &sstxTxOut1, &sstxTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// sstxBadVersionOut is an invalid SStx MsgTx with an output containing a bad +// version. +var sstxBadVersionOut = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + &sstxTxIn, + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, + &sstxTxOut1, + &sstxTxOut2, // emulate change address + &sstxTxOut1, // 3 + &sstxTxOut2, // 4 + &sstxTxOut3, // 5 P2SH + &sstxTxOut4VerBad, // 6 P2SH change + }, + LockTime: 0, + Expiry: 0, +} + +// sstxNullDataMissing is an invalid SStx MsgTx with no address push in the second +// output +var sstxNullDataMissing = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, &sstxTxOut0, &sstxTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// sstxNullDataMisplaced is an invalid SStx MsgTx that has the commitment and +// change outputs swapped +var sstxNullDataMisplaced = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &sstxTxIn, + }, + TxOut: []*wire.TxOut{ + &sstxTxOut0, &sstxTxOut2, &sstxTxOut1, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenTxIn0 is the 0th position input in a valid SSGen tx used to test out the +// IsSSGen function +var ssgenTxIn0 = wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + Tree: dcrutil.TxTreeRegular, + }, + SignatureScript: []byte{ + 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, + }, + BlockHeight: wire.NullBlockHeight, + BlockIndex: wire.NullBlockIndex, + Sequence: 0xffffffff, +} + +// ssgenTxIn1 is the 1st position input in a valid SSGen tx used to test out the +// IsSSGen function +var ssgenTxIn1 = wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + Tree: dcrutil.TxTreeStake, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: 0xffffffff, +} + +// ssgenTxOut0 is the 0th position output in a valid SSGen tx used to test out the +// IsSSGen function +var ssgenTxOut0 = wire.TxOut{ + Value: 0x00000000, // 0 + Version: 0x0000, + PkScript: []byte{ + 0x6a, // OP_RETURN + 0x24, // 36 bytes to be pushed + 0x94, 0x8c, 0x76, 0x5a, // 32 byte hash + 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x52, 0xde, 0x3d, 0x7c, + 0x00, 0xe3, 0x23, 0x21, // 4 byte height + }, +} + +// ssgenTxOut1 is the 1st position output in a valid SSGen tx used to test out the +// IsSSGen function +var ssgenTxOut1 = wire.TxOut{ + Value: 0x00000000, // 0 + Version: 0x0000, + PkScript: []byte{ + 0x6a, // OP_RETURN + 0x02, // 2 bytes to be pushed + 0x94, 0x8c, // Vote bits + }, +} + +// ssgenTxOut2 is the 2nd position output in a valid SSGen tx used to test out the +// IsSSGen function +var ssgenTxOut2 = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0000, + PkScript: []byte{ + 0xbb, // OP_SSGEN + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, +} + +// ssgenTxOut3 is a P2SH output +var ssgenTxOut3 = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0000, + PkScript: []byte{ + 0xbb, // OP_SSGEN + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// ssgenTxOut3BadVer is a P2SH output with a bad version. +var ssgenTxOut3BadVer = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0100, + PkScript: []byte{ + 0xbb, // OP_SSGEN + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// ssgenMsgTx is a valid SSGen MsgTx with an input and outputs and is used in +// various testing scenarios +var ssgenMsgTx = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut1, + &ssgenTxOut2, + &ssgenTxOut3, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxExtraInput is an invalid SSGen MsgTx with too many inputs +var ssgenMsgTxExtraInput = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut1, + &ssgenTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxExtraOutputs is an invalid SSGen MsgTx with too many outputs +var ssgenMsgTxExtraOutputs = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut1, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, &ssgenTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxStakeBaseWrong is an invalid SSGen tx with the stakebase in the wrong +// position +var ssgenMsgTxStakeBaseWrong = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn1, + &ssgenTxIn0, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut1, + &ssgenTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxBadVerOut is an invalid SSGen tx that contains an output with a bad +// version +var ssgenMsgTxBadVerOut = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut1, + &ssgenTxOut2, + &ssgenTxOut3BadVer, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxWrongZeroethOut is an invalid SSGen tx with the first output being not +// an OP_RETURN push +var ssgenMsgTxWrongZeroethOut = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut2, + &ssgenTxOut1, + &ssgenTxOut0, + }, + LockTime: 0, + Expiry: 0, +} + +// ssgenMsgTxWrongFirstOut is an invalid SSGen tx with the second output being not +// an OP_RETURN push +var ssgenMsgTxWrongFirstOut = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssgenTxIn0, + &ssgenTxIn1, + }, + TxOut: []*wire.TxOut{ + &ssgenTxOut0, + &ssgenTxOut2, + &ssgenTxOut1, + }, + LockTime: 0, + Expiry: 0, +} + +// ssrtxTxIn is the 0th position input in a valid SSRtx tx used to test out the +// IsSSRtx function +var ssrtxTxIn = wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash([32]byte{ // Make go vet happy. + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + Tree: dcrutil.TxTreeStake, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: 0xffffffff, +} + +// ssrtxTxOut is the 0th position output in a valid SSRtx tx used to test out the +// IsSSRtx function +var ssrtxTxOut = wire.TxOut{ + Value: 0x2122e300, + Version: 0x0000, + PkScript: []byte{ + 0xbc, // OP_SSGEN + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x33, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, +} + +// ssrtxTxOut2 is a P2SH output +var ssrtxTxOut2 = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0000, + PkScript: []byte{ + 0xbc, // OP_SSRTX + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// ssrtxTxOut2BadVer is a P2SH output with a non-default script version +var ssrtxTxOut2BadVer = wire.TxOut{ + Value: 0x2123e300, // 556000000 + Version: 0x0100, + PkScript: []byte{ + 0xbc, // OP_SSRTX + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, + 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, + 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x87, // OP_EQUAL + }, +} + +// ssrtxMsgTx is a valid SSRtx MsgTx with an input and outputs and is used in +// various testing scenarios +var ssrtxMsgTx = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssrtxTxIn, + }, + TxOut: []*wire.TxOut{ + &ssrtxTxOut, + &ssrtxTxOut2, + }, + LockTime: 0, + Expiry: 0, +} + +// ssrtxMsgTx is a valid SSRtx MsgTx with an input and outputs and is used in +// various testing scenarios +var ssrtxMsgTxTooManyInputs = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssrtxTxIn, + &ssrtxTxIn, + }, + TxOut: []*wire.TxOut{ + &ssrtxTxOut, + }, + LockTime: 0, + Expiry: 0, +} + +// ssrtxMsgTx is a valid SSRtx MsgTx with an input and outputs and is used in +// various testing scenarios +var ssrtxMsgTxTooManyOutputs = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssrtxTxIn, + }, + TxOut: []*wire.TxOut{ + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, &ssrtxTxOut, + }, + LockTime: 0, + Expiry: 0, +} + +var ssrtxMsgTxBadVerOut = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + &ssrtxTxIn, + }, + TxOut: []*wire.TxOut{ + &ssrtxTxOut, + &ssrtxTxOut2BadVer, + }, + LockTime: 0, + Expiry: 0, +} diff --git a/blockchain/stake/ticketdb.go b/blockchain/stake/ticketdb.go new file mode 100644 index 00000000..0f3bb5f4 --- /dev/null +++ b/blockchain/stake/ticketdb.go @@ -0,0 +1,1641 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// +// TODO: Consider adding the height where an SStx was missed for use as SSgen in +// SSRtx output[0] OP_RETURN? +// +// This file contains an in-memory database for storing information about tickets. +// +// There should be four major datasets: +// ticketMap = Ticket map keyed for mature, available tickets by number. +// spentTicketMap = Ticket map keyed for tickets that are mature but invalid or +// spent, keyed by the block in which they were invalidated. +// missedTicketMap = Ticket map keyed for SStx hash for tickets which had the +// opportunity to be spent but were not. +// revokedTicketMap = Ticket map keyed for SStx hash for tickets which had the +// opportunity to be spent but were not, and then were revoked. + +package stake + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "io/ioutil" + "math" + "path/filepath" + "sort" + "sync" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrutil" +) + +// bucketSize is the number of pre-sort buckets for the in memory database of +// live tickets. This allows an approximately 1.5x-2.5x increase in sorting speed +// and easier/more efficient handling of new block insertion and evaluation +// of reorgs. +// TODO Storing the size of the buckets somewhere would make evaluation of +// blocks being added to HEAD extremely fast and should eventually be implemented. +// For example, when finding the tickets to use you can cycle through a struct +// for each bucket where the struct stores the number of tickets in the bucket, +// so you can easily find the index of the ticket you need without creating +// a giant slice and sorting it. +// Optimizations for reorganize are possible. +const BucketsSize = math.MaxUint8 + 1 + +// TickerData contains contextual information about tickets as indicated +// below. +// TODO Replace Missed/Expired bool with single byte bitflags. +type TicketData struct { + SStxHash chainhash.Hash + Prefix uint8 // Ticket hash prefix for pre-sort + SpendHash chainhash.Hash + BlockHeight int64 // Block for where the original sstx was located + Missed bool // Whether or not the ticket was spent + Expired bool // Whether or not the ticket expired +} + +func NewTicketData(sstxHash chainhash.Hash, + prefix uint8, + spendHash chainhash.Hash, + blockHeight int64, + missed bool, + expired bool) *TicketData { + return &TicketData{sstxHash, + prefix, + spendHash, + blockHeight, + missed, + expired} +} + +// td.GobEncode serializes the TicketData struct into a gob for use in storage. +// +// This function is safe for concurrent access. +func (td *TicketData) GobEncode() ([]byte, error) { + w := new(bytes.Buffer) + encoder := gob.NewEncoder(w) + + err := encoder.Encode(td.SStxHash) + if err != nil { + return nil, err + } + err = encoder.Encode(td.Prefix) + if err != nil { + return nil, err + } + err = encoder.Encode(td.SpendHash) + if err != nil { + return nil, err + } + err = encoder.Encode(td.BlockHeight) + if err != nil { + return nil, err + } + err = encoder.Encode(td.Missed) + if err != nil { + return nil, err + } + err = encoder.Encode(td.Expired) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +// td.GobDecode deserializes the TicketData struct into a gob for use in retrieval +// from storage. +func (td *TicketData) GobDecode(buf []byte) error { + r := bytes.NewBuffer(buf) + decoder := gob.NewDecoder(r) + + err := decoder.Decode(&td.SStxHash) + if err != nil { + return err + } + err = decoder.Decode(&td.Prefix) + if err != nil { + return err + } + err = decoder.Decode(&td.SpendHash) + if err != nil { + return err + } + err = decoder.Decode(&td.BlockHeight) + if err != nil { + return err + } + err = decoder.Decode(&td.Missed) + if err != nil { + return err + } + return decoder.Decode(&td.Expired) +} + +// TicketDataSlice is a sortable data structure of pointers to TicketData. +type TicketDataSlice []*TicketData + +func NewTicketDataSliceEmpty() TicketDataSlice { + slice := make([]*TicketData, 0) + return TicketDataSlice(slice) +} + +func NewTicketDataSlice(size int) TicketDataSlice { + slice := make([]*TicketData, size) + return TicketDataSlice(slice) +} + +// Less determines which of two *TicketData values is smaller; used for sort. +func (tds TicketDataSlice) Less(i, j int) bool { + cmp := bytes.Compare(tds[i].SStxHash[:], tds[j].SStxHash[:]) + isISmaller := (cmp == -1) + return isISmaller +} + +// Swap swaps two *TicketData values. +func (tds TicketDataSlice) Swap(i, j int) { tds[i], tds[j] = tds[j], tds[i] } + +// Len returns the length of the slice. +func (tds TicketDataSlice) Len() int { return len(tds) } + +// SStxMemMap is a memory map of SStx keyed to the txHash. +type SStxMemMap map[chainhash.Hash]*TicketData + +// TicketMaps is a struct of maps that encompass the four major buckets of the +// ticket in-memory database. +type TicketMaps struct { + ticketMap []SStxMemMap + spentTicketMap map[int64]SStxMemMap + missedTicketMap SStxMemMap + revokedTicketMap SStxMemMap +} + +// tm.GobEncode serializes the TicketMaps struct into a gob for use in storage. +func (tm *TicketMaps) GobEncode() ([]byte, error) { + w := new(bytes.Buffer) + encoder := gob.NewEncoder(w) + + err := encoder.Encode(tm.ticketMap) + if err != nil { + return nil, err + } + err = encoder.Encode(tm.spentTicketMap) + if err != nil { + return nil, err + } + err = encoder.Encode(tm.missedTicketMap) + if err != nil { + return nil, err + } + err = encoder.Encode(tm.revokedTicketMap) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +// tm.GobDecode deserializes the TicketMaps struct into a gob for use in retrieval +// from storage. +func (tm *TicketMaps) GobDecode(buf []byte) error { + r := bytes.NewBuffer(buf) + decoder := gob.NewDecoder(r) + + err := decoder.Decode(&tm.ticketMap) + if err != nil { + return err + } + err = decoder.Decode(&tm.spentTicketMap) + if err != nil { + return err + } + err = decoder.Decode(&tm.missedTicketMap) + if err != nil { + return err + } + return decoder.Decode(&tm.revokedTicketMap) +} + +// TicketDB is the ticket in-memory database. +type TicketDB struct { + mtx sync.Mutex + maps TicketMaps + database database.Db + chainParams *chaincfg.Params + StakeEnabledHeight int64 +} + +// initialize allocates buckets for each ticket number in ticketMap and buckets for +// each height up to the declared height from 0. This should be called only when no +// suitable files exist to load the TicketDB from or when rescanTicketDB() is +// called. +// WARNING: Height should be 0 for all non-debug uses. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) Initialize(np *chaincfg.Params, db database.Db) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tmdb.chainParams = np + tmdb.database = db + tmdb.maps.ticketMap = make([]SStxMemMap, BucketsSize, BucketsSize) + tmdb.maps.spentTicketMap = make(map[int64]SStxMemMap) + tmdb.maps.missedTicketMap = make(SStxMemMap) + tmdb.maps.revokedTicketMap = make(SStxMemMap) + + tmdb.StakeEnabledHeight = np.StakeEnabledHeight + + // Fill in live ticket buckets + for i := 0; i < BucketsSize; i++ { + tmdb.maps.ticketMap[uint8(i)] = make(SStxMemMap) + } +} + +// maybeInsertBlock creates a new bucket in the spentTicketMap; this should be +// called this whenever we try to alter the spentTicketMap; this results in +// a lot of redundant calls, but I don't think they're expensive. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) maybeInsertBlock(height int64) { + // Check if the bucket exists for the given height. + if tmdb.maps.spentTicketMap[height] != nil { + return + } + + // If it doesn't exist, make it. + tmdb.maps.spentTicketMap[height] = make(SStxMemMap) + return +} + +// getTopBlock is the internal function which implements the public +// GetTopBlock. See the comment for GetTopBlock for more details. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) getTopBlock() int64 { + // Discover the current height. + topHeight := tmdb.StakeEnabledHeight + for { + if tmdb.maps.spentTicketMap[topHeight] == nil { + topHeight-- + break + } + topHeight++ + } + + // If we aren't yet at a stake mature blockchain. + if topHeight == (tmdb.StakeEnabledHeight - 1) { + return int64(-1) + } + return topHeight +} + +// GetTopBlock returns the top (current) block from a TicketDB. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) GetTopBlock() int64 { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + return tmdb.getTopBlock() +} + +// LoadTicketDBs fetches the stored TicketDB and UsedTicketDB from the disk and +// stores them. +// Call this after the blockchain has been loaded into the daemon. +// TODO: Make a function that checks to see if the files exist before attempting +// to load them? Or do that elsewhere. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) LoadTicketDBs(tmsPath, tmsLoc string, np *chaincfg.Params, + db database.Db) error { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tmdb.chainParams = np + tmdb.database = db + + tmdb.StakeEnabledHeight = np.StakeEnabledHeight + + filename := filepath.Join(tmsPath, tmsLoc) + + // Load the maps from disk. + diskTicketMaps, errDiskTM := ioutil.ReadFile(filename) + if errDiskTM != nil { + return fmt.Errorf("TicketDB err @ loadTicketDBs: could not load " + + "serialized ticketMaps from disk") + } + + // Create buffer for maps and load the raw file into it. + var loadedTicketMaps TicketMaps + + // Decode the maps from the buffer. + errDeserialize := loadedTicketMaps.GobDecode(diskTicketMaps) + if errDeserialize != nil { + return fmt.Errorf("could not deserialize stored ticketMaps") + } + tmdb.maps = loadedTicketMaps + + // Get the latest block height from the database. + _, curHeight, err := tmdb.database.NewestSha() + if err != nil { + return err + } + + // Check and see if the height of spentTicketMap is the same as the current + // height of the blockchain. If it isn't, spin up next DB for both from the + // blockchain itself (rescanTicketDB). + stmHeight := tmdb.getTopBlock() + + // The database chain is shorter than the ticket db chain, abort. + if stmHeight > curHeight { + return fmt.Errorf("Ticket DB err @ loadTicketDbs: there were more "+ + "blocks in the ticketDb (%v) than there were in the "+ + "main db (%v); try deleting the ticket database file", + stmHeight, curHeight) + } + + // The ticket db chain is shorter than the database chain, resync. + if stmHeight < curHeight { + log.Debugf("current height: %v, stm height %v", curHeight, stmHeight) + log.Errorf("Accessory ticket database is desynced, " + + "resyncing now") + + err := tmdb.rescanTicketDB() + if err != nil { + return err + } + } + + // If maps are empty pre-serializing, they'll be nil upon loading. + // Check to make sure that no maps are nil; if they are, generate + // them. + if tmdb.maps.ticketMap == nil { + tmdb.maps.ticketMap = make([]SStxMemMap, BucketsSize, BucketsSize) + + // Fill in live ticket buckets + for i := uint8(0); ; i++ { + tmdb.maps.ticketMap[i] = make(SStxMemMap) + + if i == math.MaxUint8 { + break + } + } + } + + if tmdb.maps.spentTicketMap == nil { + tmdb.maps.spentTicketMap = make(map[int64]SStxMemMap) + } + + if tmdb.maps.missedTicketMap == nil { + tmdb.maps.missedTicketMap = make(SStxMemMap) + } + + if tmdb.maps.revokedTicketMap == nil { + tmdb.maps.revokedTicketMap = make(SStxMemMap) + } + + return nil +} + +// Store serializes and stores a TicketDB. Only intended to be called when shut +// down has been initiated and daemon network activity has ceased. +// TODO: Serialize in a way that is cross-platform instead of gob encoding. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) Store(tmsPath string, tmsLoc string) error { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + log.Infof("Storing the ticket database to disk") + + ticketMapsBytes, err := tmdb.maps.GobEncode() + if err != nil { + return fmt.Errorf("could not serialize ticketMaps: %v", err.Error()) + } + + filename := filepath.Join(tmsPath, tmsLoc) + + // Write the encoded ticketMap and spentTicketMap to disk + if err := ioutil.WriteFile(filename, ticketMapsBytes, 0644); err != nil { + return fmt.Errorf("could not write serialized "+ + "ticketMaps to disk: %v", err.Error()) + } + + return nil +} + +// Close deletes a TicketDB and its contents. Intended to be called only when +// store() has first been called. +// Decred: In the daemon this is never called because it causes some problems +// with storage. As everything is a native Go structure in the first place, +// we don't really need this as far as I can tell, but I'll leave this in here +// in case a usage is found. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) Close() { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + return +} + +// -------------------------------------------------------------------------------- +// ! WARNING +// THESE ARE DIRECT MANIPULATION FUNCTIONS THAT SHOULD MAINLY BE USED INTERNALLY +// OR FOR FOR DEBUGGING PURPOSES ONLY. + +// pushLiveTicket pushes a mature ticket into the ticketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) pushLiveTicket(ticket *TicketData) error { + // Make sure the ticket bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.ticketMap[ticket.Prefix] == nil { + return fmt.Errorf("TicketDB err @ pushLiveTicket: bucket for tickets "+ + "numbered %v missing", ticket.Prefix) + } + + // Make sure the ticket isn't already in the map + if tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] != nil { + return fmt.Errorf("TicketDB err @ pushLiveTicket: ticket with hash %v "+ + "already exists", ticket.SStxHash) + } + + // Always false going into live ticket map + ticket.Missed = false + + // Put the ticket in its respective bucket in the map + tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] = ticket + + return nil +} + +// pushSpentTicket pushes a used ticket into the spentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) pushSpentTicket(spendHeight int64, ticket *TicketData) error { + // Make sure there's a bucket in the map for used tickets + tmdb.maybeInsertBlock(spendHeight) + + // Make sure the ticket isn't already in the map + if tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] != nil { + return fmt.Errorf("TicketDB err @ pushSpentTicket: ticket with hash "+ + "%v already exists", ticket.SStxHash) + } + + tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] = ticket + + return nil +} + +// pushMissedTicket pushes a used ticket into the spentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) pushMissedTicket(ticket *TicketData) error { + // Make sure the map exists. + if tmdb.maps.missedTicketMap == nil { + return fmt.Errorf("TicketDB err @ pushMissedTicket: map missing") + } + + // Make sure the ticket isn't already in the map. + if tmdb.maps.missedTicketMap[ticket.SStxHash] != nil { + return fmt.Errorf("TicketDB err @ pushMissedTicket: ticket with "+ + "hash %v already exists", ticket.SStxHash) + } + + // Always true going into missedTicketMap. + ticket.Missed = true + + tmdb.maps.missedTicketMap[ticket.SStxHash] = ticket + + return nil +} + +// pushRevokedTicket pushes a used ticket into the spentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) pushRevokedTicket(ticket *TicketData) error { + // Make sure the map exists. + if tmdb.maps.revokedTicketMap == nil { + return fmt.Errorf("TicketDB err @ pushRevokedTicket: map missing") + } + + // Make sure the ticket isn't already in the map. + if tmdb.maps.revokedTicketMap[ticket.SStxHash] != nil { + return fmt.Errorf("TicketDB err @ pushRevokedTicket: ticket with "+ + "hash %v already exists", ticket.SStxHash) + } + + // Always true going into revokedTicketMap. + ticket.Missed = true + + tmdb.maps.revokedTicketMap[ticket.SStxHash] = ticket + + return nil +} + +// removeLiveTicket removes live tickets that were added to the ticketMap from +// tickets maturing. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) removeLiveTicket(ticket *TicketData) error { + // Make sure the ticket bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.ticketMap[ticket.Prefix] == nil { + return fmt.Errorf("TicketDB err @ removeLiveTicket: bucket for "+ + "tickets numbered %v missing", ticket.Prefix) + } + + // Make sure the ticket itself exists + if tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] == nil { + return fmt.Errorf("TicketDB err @ removeLiveTicket: ticket %v to "+ + "delete does not exist!", ticket.SStxHash) + } + + // Make sure that the tickets are indentical in the unlikely case of a hash + // collision + if *tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] != *ticket { + return fmt.Errorf("TicketDB err @ removeLiveTicket: ticket " + + "hash duplicate, but non-identical data") + } + + delete(tmdb.maps.ticketMap[ticket.Prefix], ticket.SStxHash) + return nil +} + +// removeSpentTicket removes spent tickets that were added to the spentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) removeSpentTicket(spendHeight int64, ticket *TicketData) error { + // Make sure the height bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.spentTicketMap[spendHeight] == nil { + return fmt.Errorf("TicketDB err @ removeSpentTicket: bucket for "+ + "BlockHeight numbered %v missing", ticket.BlockHeight) + } + + // Make sure the ticket itself exists + if tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] == nil { + return fmt.Errorf("TicketDB err @ removeSpentTicket: ticket to "+ + "delete does not exist!", ticket.SStxHash) + } + + // Make sure that the tickets are indentical in the unlikely case of a hash + // collision + if *tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] != *ticket { + return fmt.Errorf("TicketDB err @ removeSpentTicket: ticket hash " + + "duplicate, but non-identical data") + } + + delete(tmdb.maps.spentTicketMap[spendHeight], ticket.SStxHash) + return nil +} + +// removeMissedTicket removes missed tickets that were added to the spentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) removeMissedTicket(ticket *TicketData) error { + // Make sure the map exists. + if tmdb.maps.missedTicketMap == nil { + return fmt.Errorf("TicketDB err @ removeMissedTicket: map missing") + } + + // Make sure the ticket exists + if tmdb.maps.missedTicketMap[ticket.SStxHash] == nil { + return fmt.Errorf("TicketDB err @ removeMissedTicket: ticket to "+ + "delete does not exist!", ticket.SStxHash) + } + + // Make sure that the tickets are indentical in the unlikely case of a hash + // collision + if *tmdb.maps.missedTicketMap[ticket.SStxHash] != *ticket { + return fmt.Errorf("TicketDB err @ removeMissedTicket: ticket hash " + + "duplicate, but non-identical data") + } + + delete(tmdb.maps.missedTicketMap, ticket.SStxHash) + return nil +} + +// removeRevokedTicket removes missed tickets that were added to the +// revoked ticket map. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) removeRevokedTicket(ticket *TicketData) error { + // Make sure the map exists. + if tmdb.maps.revokedTicketMap == nil { + return fmt.Errorf("TicketDB err @ removeRevokedTicket: map missing") + } + + // Make sure the ticket exists. + if tmdb.maps.revokedTicketMap[ticket.SStxHash] == nil { + return fmt.Errorf("TicketDB err @ removeRevokedTicket: ticket to "+ + "delete does not exist!", ticket.SStxHash) + } + + // Make sure that the tickets are indentical in the unlikely case of a hash + // collision. + if *tmdb.maps.revokedTicketMap[ticket.SStxHash] != *ticket { + return fmt.Errorf("TicketDB err @ removeRevokedTicket: ticket hash " + + "duplicate, but non-identical data") + } + + delete(tmdb.maps.revokedTicketMap, ticket.SStxHash) + return nil +} + +// removeSpentHeight removes a height bucket from the SpentTicketMap. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) removeSpentHeight(height int64) error { + // Make sure the height exists + if tmdb.maps.spentTicketMap[height] == nil { + return fmt.Errorf("TicketDB err @ removeSpentHeight: height to "+ + "delete does not exist!", height) + } + + delete(tmdb.maps.spentTicketMap, height) + return nil +} + +// DumpMapsPointer is a testing function that returns a pointer to +// the internally held maps. Used for testing. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) DumpMapsPointer() TicketMaps { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + return tmdb.maps +} + +// END DIRECT ACCESS/DEBUG TOOLS. +// -------------------------------------------------------------------------------- + +// cloneSStxMemMap is a helper function to clone mem maps to avoid races. +func cloneSStxMemMap(mapToCopy SStxMemMap) SStxMemMap { + newMemMap := make(SStxMemMap) + + for hash, ticket := range mapToCopy { + newMemMap[hash] = NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + + return newMemMap +} + +// CheckLiveTicket checks for the existence of a live ticket in ticketMap. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) CheckLiveTicket(txHash chainhash.Hash) (bool, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + prefix := txHash[0] + + // Make sure the ticket bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.ticketMap[prefix] == nil { + return false, fmt.Errorf("TicketDB err @ checkLiveTicket: bucket for "+ + "tickets numbered %v missing", prefix) + } + + if tmdb.maps.ticketMap[prefix][txHash] != nil { + return true, nil + } + return false, nil +} + +// CheckMissedTicket checks for the existence of a missed ticket in the missed +// ticket map. Assumes missedTicketMap is intialized. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) CheckMissedTicket(txHash chainhash.Hash) bool { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + if _, exists := tmdb.maps.missedTicketMap[txHash]; exists { + return true + } + return false +} + +// CheckRevokedTicket checks for the existence of a revoked ticket in the +// revoked ticket map. Assumes missedTicketMap is intialized. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) CheckRevokedTicket(txHash chainhash.Hash) bool { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + if _, exists := tmdb.maps.revokedTicketMap[txHash]; exists { + return true + } + return false +} + +// DumpLiveTickets duplicates the contents of a ticket bucket from the databases's +// ticketMap and returns them to the user. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) DumpLiveTickets(bucket uint8) (SStxMemMap, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tickets := make(SStxMemMap) + + // Make sure the ticket bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.ticketMap[bucket] == nil { + return nil, fmt.Errorf("TicketDB err @ DumpLiveTickets: bucket for "+ + "tickets numbered %v missing", bucket) + } + + for _, ticket := range tmdb.maps.ticketMap[bucket] { + tickets[ticket.SStxHash] = NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return tickets, nil +} + +// DumpSpentTickets duplicates the contents of a ticket bucket from the databases's +// spentTicketMap and returns them to the user. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) DumpSpentTickets(height int64) (SStxMemMap, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tickets := make(SStxMemMap) + + // Make sure the ticket bucket exists; if it doesn't something has gone wrong + // with the initialization + if tmdb.maps.spentTicketMap[height] == nil { + return nil, fmt.Errorf("TicketDB err @ dumpSpentTickets: bucket for "+ + "tickets numbered %v missing", height) + } + + for _, ticket := range tmdb.maps.spentTicketMap[height] { + tickets[ticket.SStxHash] = NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return tickets, nil +} + +// DumpMissedTickets duplicates the contents of a ticket bucket from the +// databases's missedTicketMap and returns them to the user. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) DumpMissedTickets() (SStxMemMap, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tickets := make(SStxMemMap) + + // Make sure the map is actually initialized. + if tmdb.maps.missedTicketMap == nil { + return nil, fmt.Errorf("TicketDB err @ missedTicketMap: map for " + + "missed tickets uninitialized") + } + + for _, ticket := range tmdb.maps.missedTicketMap { + tickets[ticket.SStxHash] = NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return tickets, nil +} + +// DumpRevokedTickets duplicates the contents of a ticket bucket from the +// databases's missedTicketMap and returns them to the user. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) DumpRevokedTickets() (SStxMemMap, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + tickets := make(SStxMemMap) + + // Make sure the map is actually initialized. + if tmdb.maps.revokedTicketMap == nil { + return nil, fmt.Errorf("TicketDB err @ revokedTicketMap: map for " + + "revoked tickets uninitialized") + } + + for _, ticket := range tmdb.maps.revokedTicketMap { + tickets[ticket.SStxHash] = NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return tickets, nil +} + +// GetMissedTicket locates a missed ticket in the missed ticket database, +// duplicates the ticket data, and returns it. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) GetMissedTicket(hash chainhash.Hash) *TicketData { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + if ticket, exists := tmdb.maps.missedTicketMap[hash]; exists { + return NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return nil +} + +// GetRevokedTicket locates a revoked ticket in the revoked ticket database, +// duplicates the ticket data, and returns it. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) GetRevokedTicket(hash chainhash.Hash) *TicketData { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + if ticket, exists := tmdb.maps.revokedTicketMap[hash]; exists { + return NewTicketData(ticket.SStxHash, + ticket.Prefix, + ticket.SpendHash, + ticket.BlockHeight, + ticket.Missed, + ticket.Expired) + } + return nil +} + +// GetLiveTicketBucketData creates a map of [int]int indicating the number +// of tickets in each bucket. Used for an RPC call. +func (tmdb *TicketDB) GetLiveTicketBucketData() map[int]int { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + ltbd := make(map[int]int) + for i := 0; i < BucketsSize; i++ { + ltbd[int(i)] = len(tmdb.maps.ticketMap[i]) + } + + return ltbd +} + +// GetLiveTicketsInBucketData creates a map indicating the ticket hash and the +// owner's address for each bucket. Used for an RPC call. +func (tmdb *TicketDB) GetLiveTicketsInBucketData( + bucket uint8) (map[chainhash.Hash]dcrutil.Address, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + ltbd := make(map[chainhash.Hash]dcrutil.Address) + tickets := tmdb.maps.ticketMap[bucket] + for _, ticket := range tickets { + // Load the ticket from the database and find the address that it's + // going to. + txReply, err := tmdb.database.FetchTxBySha(&ticket.SStxHash) + if err != nil { + return nil, err + } + + _, addr, _, err := + txscript.ExtractPkScriptAddrs(txReply[0].Tx.TxOut[0].Version, + txReply[0].Tx.TxOut[0].PkScript, tmdb.chainParams) + if err != nil { + return nil, err + } + ltbd[ticket.SStxHash] = addr[0] + } + + return ltbd, nil +} + +// GetLiveTicketsForAddress gets all currently active tickets for a given +// address. +func (tmdb *TicketDB) GetLiveTicketsForAddress( + address dcrutil.Address) ([]chainhash.Hash, error) { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + var ltfa []chainhash.Hash + for i := 0; i < BucketsSize; i++ { + for _, ticket := range tmdb.maps.ticketMap[i] { + // Load the ticket from the database and find the address that it's + // going to. + txReply, err := tmdb.database.FetchTxBySha(&ticket.SStxHash) + if err != nil { + return nil, err + } + + _, addr, _, err := + txscript.ExtractPkScriptAddrs(txReply[0].Tx.TxOut[0].Version, + txReply[0].Tx.TxOut[0].PkScript, tmdb.chainParams) + if err != nil { + return nil, err + } + + // Compare the HASH160 result and see if it's equal. + if bytes.Equal(addr[0].ScriptAddress(), address.ScriptAddress()) { + ltfa = append(ltfa, ticket.SStxHash) + } + } + } + + return ltfa, nil +} + +// spendTickets transfers tickets from the ticketMap to the spentTicketMap. Useful +// when connecting blocks. Also pushes missed tickets to the missed ticket map. +// usedtickets is a map that contains all tickets that were actually used in SSGen +// votes; all other tickets are considered missed. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) spendTickets(parentBlock *dcrutil.Block, + usedTickets map[chainhash.Hash]struct{}, + spendingHashes map[chainhash.Hash]chainhash.Hash) (SStxMemMap, error) { + + // If there is nothing being spent, break. + if len(spendingHashes) < 1 { + return nil, nil + } + + // Make sure there's a bucket in the map for used tickets + height := parentBlock.Height() + 1 + tmdb.maybeInsertBlock(height) + + tempTickets := make(SStxMemMap) + + // Sort the entire list of tickets lexicographically by sorting + // each bucket and then appending it to the list. + totalTickets := 0 + sortedSlice := make([]*TicketData, 0) + for i := 0; i < BucketsSize; i++ { + mapLen := len(tmdb.maps.ticketMap[i]) + totalTickets += mapLen + tempTdSlice := NewTicketDataSlice(mapLen) + itr := 0 // Iterator + for _, td := range tmdb.maps.ticketMap[i] { + tempTdSlice[itr] = td + itr++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + + // Use the parent block's header to seed a PRNG that picks the lottery winners. + ticketsPerBlock := int(tmdb.chainParams.TicketsPerBlock) + pbhB, err := parentBlock.MsgBlock().Header.Bytes() + if err != nil { + return nil, err + } + prng := NewHash256PRNG(pbhB) + ts, err := FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) + if err != nil { + return nil, err + } + ticketsToSpendOrMiss := make([]*TicketData, ticketsPerBlock, ticketsPerBlock) + for i, idx := range ts { + ticketsToSpendOrMiss[i] = sortedSlice[idx] + } + + // Spend or miss these tickets by checking for their existence in the + // passed usedtickets map. + tixSpent := 0 + tixMissed := 0 + for _, ticket := range ticketsToSpendOrMiss { + // Move the ticket from active tickets map into the used tickets map + // if the ticket was spent. + _, wasSpent := usedTickets[ticket.SStxHash] + + if wasSpent { + ticket.Missed = false + ticket.SpendHash = spendingHashes[ticket.SStxHash] + err := tmdb.pushSpentTicket(height, ticket) + if err != nil { + return nil, err + } + err = tmdb.removeLiveTicket(ticket) + if err != nil { + return nil, err + } + tixSpent++ + } else { // Ticket missed being spent and --> false or nil + ticket.Missed = true // TODO fix test failure @ L150 due to this + err := tmdb.pushSpentTicket(height, ticket) + if err != nil { + return nil, err + } + err = tmdb.pushMissedTicket(ticket) + if err != nil { + return nil, err + } + err = tmdb.removeLiveTicket(ticket) + if err != nil { + return nil, err + } + tixMissed++ + } + + // Report on the spent and missed tickets for the block in debug. + if ticket.Missed { + log.Debugf("Ticket %v has been missed and expired from "+ + "the lottery pool as a missed ticket", ticket.SStxHash) + } else { + log.Debugf("Ticket %v was spent and removed from "+ + "the lottery pool", ticket.SStxHash) + } + + // Add the ticket to the temporary tickets buffer for later use in + // map restoration if needed. + tempTickets[ticket.SStxHash] = ticket + } + + // Some sanity checks. + if tixSpent != len(usedTickets) { + errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ + "tickets was spent, but %v many tickets should "+ + "have been spent!", tixSpent, len(usedTickets)) + return nil, errors.New(errStr) + } + + if tixMissed != (ticketsPerBlock - len(usedTickets)) { + errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ + "tickets was missed, but %v many tickets should "+ + "have been missed!", tixMissed, ticketsPerBlock-len(usedTickets)) + return nil, errors.New(errStr) + } + + if (tixSpent + tixMissed) != ticketsPerBlock { + errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ + "tickets was spent and missed, but TicketsPerBlock %v many "+ + "tickets should have been spent!", tixSpent, ticketsPerBlock) + return nil, errors.New(errStr) + } + + return tempTickets, nil +} + +// expireTickets looks all tickets in the live ticket bucket at height +// TicketExpiry many blocks ago, and then any tickets now expiring to the missed +// tickets map. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) expireTickets(height int64) (SStxMemMap, error) { + toExpireHeight := height - int64(tmdb.chainParams.TicketExpiry) + if toExpireHeight < int64(tmdb.StakeEnabledHeight) { + return nil, nil + } + + expiredTickets := make(SStxMemMap) + + for i := 0; i < BucketsSize; i++ { + for _, ticket := range tmdb.maps.ticketMap[i] { + if ticket.BlockHeight == toExpireHeight { + err := tmdb.pushSpentTicket(height, ticket) + if err != nil { + return nil, err + } + err = tmdb.pushMissedTicket(ticket) + if err != nil { + return nil, err + } + err = tmdb.removeLiveTicket(ticket) + if err != nil { + return nil, err + } + + ticket.Expired = true + expiredTickets[ticket.SStxHash] = ticket + } + } + } + + return expiredTickets, nil +} + +// revokeTickets takes a list of revoked tickets from SSRtx and removes them +// from the missedTicketMap, then returns all the killed tickets in a map. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) revokeTickets( + revocations map[chainhash.Hash]struct{}) (SStxMemMap, error) { + + revokedTickets := make(SStxMemMap) + + for hash, _ := range revocations { + ticket := tmdb.maps.missedTicketMap[hash] + + if ticket == nil { + return nil, errors.New("revokeTickets attempted to revoke ticket " + + "not found in missedTicketsMap!") + } + revokedTickets[hash] = ticket + + err := tmdb.pushRevokedTicket(ticket) + if err != nil { + return nil, err + } + + err = tmdb.removeMissedTicket(ticket) + if err != nil { + return nil, err + } + } + + return revokedTickets, nil +} + +// unrevokeTickets takes a list of revoked tickets from SSRtx and moves them to +// the missedTicketMap, then returns all these tickets in a map. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) unrevokeTickets(height int64) (SStxMemMap, error) { + // Get the block of interest. + var hash, errHash = tmdb.database.FetchBlockShaByHeight(height) + if errHash != nil { + return nil, errHash + } + + var block, errBlock = tmdb.database.FetchBlockBySha(hash) + if errBlock != nil { + return nil, errBlock + } + + revocations := make(map[chainhash.Hash]bool) + + for _, staketx := range block.STransactions() { + if is, _ := IsSSRtx(staketx); is { + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + revocations[sstxHash] = true + } + } + + unrevokedTickets := make(SStxMemMap) + + for hash, _ := range revocations { + ticket := tmdb.maps.revokedTicketMap[hash] + + if ticket == nil { + return nil, errors.New("unrevokeTickets attempted to unrevoke " + + "ticket not found in revokedTicketsMap!") + } + + unrevokedTickets[hash] = ticket + + err := tmdb.pushMissedTicket(ticket) + if err != nil { + return nil, err + } + + err = tmdb.removeRevokedTicket(ticket) + if err != nil { + return nil, err + } + } + + return unrevokedTickets, nil +} + +// unspendTickets unspends all tickets that were previously spent at some height to +// the ticketMap; used for rolling back changes from the old main chain blocks when +// encountering and validating a fork. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) unspendTickets(height int64) (SStxMemMap, error) { + tempTickets := make(SStxMemMap) + + for _, ticket := range tmdb.maps.spentTicketMap[height] { + if ticket.Missed == true { + err := tmdb.removeMissedTicket(ticket) + if err != nil { + return nil, err + } + + // Marked that it was not missed or expired. + ticket.Missed = false + ticket.Expired = false + } + + // Zero out the spend hash. + ticket.SpendHash = chainhash.Hash{} + + // Add the ticket to the temporary tickets buffer for later use in + // map restoration if needed. + tempTickets[ticket.SStxHash] = ticket + + // Move the ticket from used tickets map into the active tickets map. + err := tmdb.pushLiveTicket(ticket) + if err != nil { + return nil, err + } + + // Delete the ticket from the spent ticket map. + err = tmdb.removeSpentTicket(height, ticket) + if err != nil { + return nil, err + } + } + + // Delete the height itself from the spentTicketMap. + err := tmdb.removeSpentHeight(height) + if err != nil { + return nil, err + } + + return tempTickets, nil +} + +// getNewTicketsFromHeight loads a block from leveldb and parses SStx from it using +// chain/stake's IsSStx function. +// This is intended to be used to get ticket numbers from the MAIN CHAIN as +// decribed in the DB. +// SIDE CHAIN evaluation should be instantiated in package:chain. +// +// This function MUST be called with the tmdb lock held (for reads). +func (tmdb *TicketDB) getNewTicketsFromHeight(height int64) (SStxMemMap, error) { + if height < tmdb.StakeEnabledHeight { + errStr := fmt.Sprintf("Tried to generate tickets for immature blockchain"+ + " at height %v", height) + return nil, errors.New(errStr) + } + + matureHeight := height - int64(tmdb.chainParams.TicketMaturity) + + var hash, errHash = tmdb.database.FetchBlockShaByHeight(matureHeight) + if errHash != nil { + return nil, errHash + } + + var block, errBlock = tmdb.database.FetchBlockBySha(hash) + if errBlock != nil { + return nil, errBlock + } + + // Create a map of ticketHash --> ticket to fill out + tickets := make(SStxMemMap) + + stakeTransactions := block.STransactions() + + // Fill out the ticket data as best we can initially + for _, staketx := range stakeTransactions { + if is, _ := IsSStx(staketx); is { + // Calculate the prefix for pre-sort. + sstxHash := *staketx.Sha() + + ticket := new(TicketData) + ticket.SStxHash = sstxHash + ticket.Prefix = uint8(sstxHash[0]) + ticket.SpendHash = chainhash.Hash{} // Unspent at this point + ticket.BlockHeight = height + ticket.Missed = false + ticket.Expired = false + + tickets[ticket.SStxHash] = ticket + } + } + + return tickets, nil +} + +// pushMatureTicketsAtHeight matures tickets from TICKET_MATURITY blocks ago by +// looking them up in the database. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) pushMatureTicketsAtHeight(height int64) (SStxMemMap, error) { + tempTickets := make(SStxMemMap) + + tickets, err := tmdb.getNewTicketsFromHeight(height) + if err != nil { + return nil, err + } + + for _, ticket := range tickets { + tempTickets[ticket.SStxHash] = ticket + + errPush := tmdb.pushLiveTicket(ticket) + if errPush != nil { + return nil, errPush + } + } + + return tempTickets, nil +} + +// insertBlock is the internal function which implements the public +// InsertBlock. See the comment for InsertBlock for more details. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) insertBlock(block *dcrutil.Block) (SStxMemMap, + SStxMemMap, SStxMemMap, error) { + + height := block.Height() + if height < tmdb.StakeEnabledHeight { + return nil, nil, nil, nil + } + + // Sanity check: Does the number of tickets in ticketMap equal the number + // of tickets indicated in the header? + poolSizeBlock := int(block.MsgBlock().Header.PoolSize) + poolSize := 0 + for i := 0; i < BucketsSize; i++ { + poolSize += len(tmdb.maps.ticketMap[i]) + } + if poolSize != poolSizeBlock { + return nil, nil, nil, fmt.Errorf("ticketpoolsize in block %v not "+ + "equal to the calculated ticketpoolsize, indicating database "+ + "corruption (got %v, want %v)", + block.Sha(), + poolSizeBlock, + poolSize) + } + + // Create the block in the spentTicketMap. + tmdb.maybeInsertBlock(block.Height()) + + // Iterate through all the SSGen (vote) tx in the block and add them to + // a map of tickets that were actually used. The rest of the tickets in + // the buckets were then considered missed --> missedTicketMap. + // Note that it doesn't really matter what value you set usedTickets to, + // it's just a map of tickets that were actually used in the block. It + // would probably be more efficient to use an array. + usedTickets := make(map[chainhash.Hash]struct{}) + spendingHashes := make(map[chainhash.Hash]chainhash.Hash) + revocations := make(map[chainhash.Hash]struct{}) + + for _, staketx := range block.STransactions() { + if is, _ := IsSSGen(staketx); is { + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[1] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + usedTickets[sstxHash] = struct{}{} + spendingHashes[sstxHash] = *staketx.Sha() + } + + if is, _ := IsSSRtx(staketx); is { + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + revocations[sstxHash] = struct{}{} + } + } + + // Spend or miss all the necessary tickets and do some sanity checks. + parentBlock, err := tmdb.database.FetchBlockBySha( + &block.MsgBlock().Header.PrevBlock) + if err != nil { + return nil, nil, nil, err + } + spentAndMissedTickets, err := tmdb.spendTickets(parentBlock, + usedTickets, + spendingHashes) + if err != nil { + return nil, nil, nil, err + } + + // Expire all old tickets, and stick them into the spent and missed ticket + // map too. + expiredTickets, err := tmdb.expireTickets(height) + if err != nil { + return nil, nil, nil, err + } + if len(expiredTickets) > 0 && len(spentAndMissedTickets) == 0 { + return nil, nil, nil, fmt.Errorf("tried to expire tickets before " + + "stake validation height! TicketExpiry may be too small") + } + if len(expiredTickets) > 0 { + for hash, ticket := range expiredTickets { + spentAndMissedTickets[hash] = ticket + } + } + + revokedTickets, err := tmdb.revokeTickets(revocations) + if err != nil { + return nil, nil, nil, err + } + + newTickets, err := tmdb.pushMatureTicketsAtHeight(block.Height()) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Connected block %v (height %v) to the ticket database", + block.Sha(), block.Height()) + + return cloneSStxMemMap(spentAndMissedTickets), cloneSStxMemMap(newTickets), + cloneSStxMemMap(revokedTickets), nil +} + +// InsertBlock is the main work horse for inserting blocks in the TicketDB. +// Warning: I think this and the remove block functions pass pointers +// back to TicketDB data. If you call this function and use the SStxMemMaps +// it returns you need to make sure you don't modify their contents +// externally. In the future consider passing by value if this causes +// a consensus failure somehow. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) InsertBlock(block *dcrutil.Block) (SStxMemMap, + SStxMemMap, SStxMemMap, error) { + + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + return tmdb.insertBlock(block) +} + +// unpushMatureTicketsAtHeight unmatures tickets from TICKET_MATURITY blocks ago by +// looking them up in the database. +// +// Safe for concurrent access (does not use TicketDB maps directly). +func (tmdb *TicketDB) unpushMatureTicketsAtHeight(height int64) (SStxMemMap, + error) { + + tempTickets := make(SStxMemMap) + + tickets, err := tmdb.getNewTicketsFromHeight(height) + if err != nil { + return nil, err + } + + for _, ticket := range tickets { + tempTickets[ticket.SStxHash] = ticket + + errUnpush := tmdb.removeLiveTicket(ticket) + if errUnpush != nil { + return nil, errUnpush + } + } + + return tempTickets, nil +} + +// RemoveBlockToHeight is the main work horse for removing blocks from the +// TicketDB. This function will remove all blocks until reaching the block at +// height. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) RemoveBlockToHeight(height int64) (map[int64]SStxMemMap, + map[int64]SStxMemMap, map[int64]SStxMemMap, error) { + + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + if height < tmdb.StakeEnabledHeight { + return nil, nil, nil, fmt.Errorf("TicketDB Error: tried to remove " + + "blocks to before minimum maturation height!") + } + + // Discover the current height + topHeight := tmdb.getTopBlock() + + if height >= topHeight { + return nil, nil, nil, fmt.Errorf("TicketDB @ RemoveBlock: Tried to " + + "remove blocks that are beyond or at the top block!") + } + + // Create pseudo-DB maps of all the changes we're making + unmaturedTicketMap := make(map[int64]SStxMemMap) + unspentTicketMap := make(map[int64]SStxMemMap) + unrevokedTicketMap := make(map[int64]SStxMemMap) + + // Iterates down from the top block, removing all changes that were made to + // the stake db at that block, until it reaches the height specified. + for curHeight := topHeight; curHeight > height; curHeight-- { + unmaturedTicketMap[curHeight] = make(SStxMemMap) + unspentTicketMap[curHeight] = make(SStxMemMap) + unrevokedTicketMap[curHeight] = make(SStxMemMap) + + unmaturedTickets, err := tmdb.unpushMatureTicketsAtHeight(curHeight) + if err != nil { + return nil, nil, nil, err + } + unmaturedTicketMap[curHeight] = cloneSStxMemMap(unmaturedTickets) + + unrevokedTickets, err := tmdb.unrevokeTickets(curHeight) + if err != nil { + return nil, nil, nil, err + } + unrevokedTicketMap[curHeight] = cloneSStxMemMap(unrevokedTickets) + + // Note that unspendTickets below also deletes the block from the + // spentTicketMap, and so updates our top block. + unspentTickets, err := tmdb.unspendTickets(curHeight) + if err != nil { + return nil, nil, nil, err + } + unspentTicketMap[curHeight] = cloneSStxMemMap(unspentTickets) + } + + return unmaturedTicketMap, unrevokedTicketMap, unspentTicketMap, nil +} + +// rescanTicketDB is the internal function which implements the public +// RescanTicketDB. See the comment for RescanTicketDB for more details. +// +// This function MUST be called with the tmdb lock held (for writes). +func (tmdb *TicketDB) rescanTicketDB() error { + // Get the latest block height from the database. + _, height, err := tmdb.database.NewestSha() + if err != nil { + return err + } + + if height < tmdb.StakeEnabledHeight { + return nil + } + + var freshTms TicketMaps + freshTms.ticketMap = make([]SStxMemMap, BucketsSize, BucketsSize) + freshTms.spentTicketMap = make(map[int64]SStxMemMap) + freshTms.missedTicketMap = make(SStxMemMap) + freshTms.revokedTicketMap = make(SStxMemMap) + + tmdb.maps = freshTms + + // Fill in live ticket buckets + for i := 0; i < BucketsSize; i++ { + tmdb.maps.ticketMap[i] = make(SStxMemMap) + } + + // Tickets don't exist before StakeEnabledHeight + for curHeight := tmdb.StakeEnabledHeight; curHeight <= height; curHeight++ { + // Go through the winners and votes for each block and use those to spend + // tickets in the ticket db. + hash, errHash := tmdb.database.FetchBlockShaByHeight(curHeight) + if errHash != nil { + return errHash + } + + block, errBlock := tmdb.database.FetchBlockBySha(hash) + if errBlock != nil { + return errBlock + } + + _, _, _, err = tmdb.insertBlock(block) + if err != nil { + return err + } + } + return nil +} + +// RescanTicketDB rescans and regenerates both ticket memory maps starting from +// the genesis block and extending to the current block. This uses a lot of +// memory because it doesn't kill the old buckets. +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) RescanTicketDB() error { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + return tmdb.rescanTicketDB() +} + +// GetTicketHashesForMissed gets all the currently missed tickets, copies +// their hashes, and returns them. +// TODO: Is a pointer to a pointer for a slice really necessary? +// +// This function is safe for concurrent access. +func (tmdb *TicketDB) GetTicketHashesForMissed() []chainhash.Hash { + tmdb.mtx.Lock() + defer tmdb.mtx.Unlock() + + missedTickets := make([]chainhash.Hash, len(tmdb.maps.missedTicketMap)) + + itr := 0 + for hash, _ := range tmdb.maps.missedTicketMap { + missedTickets[itr] = hash + itr++ + } + + return missedTickets +} diff --git a/blockchain/stake/ticketdb_test.go b/blockchain/stake/ticketdb_test.go new file mode 100644 index 00000000..177c7ec7 --- /dev/null +++ b/blockchain/stake/ticketdb_test.go @@ -0,0 +1,272 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package stake_test + +import ( + "bytes" + "compress/bzip2" + "encoding/gob" + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "sort" + "testing" + "time" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +// cloneTicketDB makes a deep copy of a ticket DB by +// serializing it to a gob and then deserializing it +// into an empty container. +func cloneTicketDB(tmdb *stake.TicketDB) (stake.TicketMaps, error) { + mapsPointer := tmdb.DumpMapsPointer() + mapsBytes, err := mapsPointer.GobEncode() + if err != nil { + return stake.TicketMaps{}, + fmt.Errorf("clone db error: could not serialize ticketMaps") + } + + var mapsCopy stake.TicketMaps + if err := mapsCopy.GobDecode(mapsBytes); err != nil { + return stake.TicketMaps{}, + fmt.Errorf("clone db error: could not deserialize " + + "ticketMaps") + } + + return mapsCopy, nil +} + +// hashInSlice returns whether a hash exists in a slice or not. +func hashInSlice(h *chainhash.Hash, list []*chainhash.Hash) bool { + for _, hash := range list { + if h.IsEqual(hash) { + return true + } + } + + return false +} + +func TestTicketDB(t *testing.T) { + // Declare some useful variables + testBCHeight := int64(168) + + // Set up a DB + database, err := database.CreateDB("leveldb", "ticketdb_test") + if err != nil { + t.Errorf("Db create error: %v", err.Error()) + } + + // Make a new tmdb to fill with dummy live and used tickets + var tmdb stake.TicketDB + tmdb.Initialize(simNetParams, database) + + filename := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2") + fi, err := os.Open(filename) + bcStream := bzip2.NewReader(fi) + defer fi.Close() + + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) + + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockchain := make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockchain); err != nil { + t.Errorf("error decoding test blockchain") + } + + var CopyOfMapsAtBlock50, CopyOfMapsAtBlock168 stake.TicketMaps + var ticketsToSpendIn167 []chainhash.Hash + var sortedTickets167 []*stake.TicketData + + for i := int64(0); i <= testBCHeight; i++ { + block, err := dcrutil.NewBlockFromBytes(blockchain[i]) + if err != nil { + t.Errorf("block deserialization error on block %v", i) + } + block.SetHeight(i) + database.InsertBlock(block) + tmdb.InsertBlock(block) + + if i == 50 { + // Create snapshot of tmdb at block 50 + CopyOfMapsAtBlock50, err = cloneTicketDB(&tmdb) + if err != nil { + t.Errorf("db cloning at block 50 failure! %v", err) + } + } + + // Test to make sure that ticket selection is working correctly. + if i == 167 { + // Sort the entire list of tickets lexicographically by sorting + // each bucket and then appending it to the list. Then store it + // to use in the next block. + totalTickets := 0 + sortedSlice := make([]*stake.TicketData, 0) + for i := 0; i < stake.BucketsSize; i++ { + tix, err := tmdb.DumpLiveTickets(uint8(i)) + if err != nil { + t.Errorf("error dumping live tickets") + } + mapLen := len(tix) + totalTickets += mapLen + tempTdSlice := stake.NewTicketDataSlice(mapLen) + itr := 0 // Iterator + for _, td := range tix { + tempTdSlice[itr] = td + itr++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + sortedTickets167 = sortedSlice + } + + if i == 168 { + parentBlock, err := dcrutil.NewBlockFromBytes(blockchain[i-1]) + if err != nil { + t.Errorf("block deserialization error on block %v", i-1) + } + pbhB, err := parentBlock.MsgBlock().Header.Bytes() + if err != nil { + t.Errorf("block header serialization error") + } + prng := stake.NewHash256PRNG(pbhB) + ts, err := stake.FindTicketIdxs(int64(len(sortedTickets167)), + int(simNetParams.TicketsPerBlock), prng) + if err != nil { + t.Errorf("failure on FindTicketIdxs") + } + for _, idx := range ts { + ticketsToSpendIn167 = + append(ticketsToSpendIn167, sortedTickets167[idx].SStxHash) + } + + // Make sure that the tickets that were supposed to be spent or + // missed were. + spentTix, err := tmdb.DumpSpentTickets(i) + if err != nil { + t.Errorf("DumpSpentTickets failure") + } + for _, h := range ticketsToSpendIn167 { + if _, ok := spentTix[h]; !ok { + t.Errorf("missing ticket %v that should have been missed "+ + "or spent in block %v", h, i) + } + } + + // Create snapshot of tmdb at block 168 + CopyOfMapsAtBlock168, err = cloneTicketDB(&tmdb) + if err != nil { + t.Errorf("db cloning at block 168 failure! %v", err) + } + } + } + + // Remove five blocks from HEAD~1 + _, _, _, err = tmdb.RemoveBlockToHeight(50) + if err != nil { + t.Errorf("error: %v", err) + } + + // Test if the roll back was symmetric to the earlier snapshot + if !reflect.DeepEqual(tmdb.DumpMapsPointer(), CopyOfMapsAtBlock50) { + t.Errorf("The td did not restore to a previous block height correctly!") + } + + // Test rescanning a ticket db + err = tmdb.RescanTicketDB() + if err != nil { + t.Errorf("rescanticketdb err: %v", err.Error()) + } + + // Test if the db file storage was symmetric to the earlier snapshot + if !reflect.DeepEqual(tmdb.DumpMapsPointer(), CopyOfMapsAtBlock168) { + t.Errorf("The td did not rescan to HEAD correctly!") + } + + err = os.Mkdir("testdata/", os.FileMode(0700)) + if err != nil { + t.Error(err) + } + + // Store the ticket db to disk + err = tmdb.Store("testdata/", "testtmdb") + if err != nil { + t.Errorf("error: %v", err) + } + + var tmdb2 stake.TicketDB + err = tmdb2.LoadTicketDBs("testdata/", "testtmdb", simNetParams, database) + if err != nil { + t.Errorf("error: %v", err) + } + + // Test if the db file storage was symmetric to previously rescanned one + if !reflect.DeepEqual(tmdb.DumpMapsPointer(), tmdb2.DumpMapsPointer()) { + t.Errorf("The td did not rescan to a previous block height correctly!") + } + + tmdb2.Close() + + // Test dumping missing tickets from block 152 + missedIn152, _ := chainhash.NewHashFromStr( + "84f7f866b0af1cc278cb8e0b2b76024a07542512c76487c83628c14c650de4fa") + + tmdb.RemoveBlockToHeight(152) + + missedTix, err := tmdb.DumpMissedTickets() + if err != nil { + t.Errorf("err dumping missed tix: %v", err.Error()) + } + + if _, exists := missedTix[*missedIn152]; !exists { + t.Errorf("couldn't finding missed tx 1 %v in tmdb @ block 152!", + missedIn152) + } + + tmdb.RescanTicketDB() + + // Make sure that the revoked map contains the revoked tx + revokedSlice := []*chainhash.Hash{missedIn152} + + revokedTix, err := tmdb.DumpRevokedTickets() + if err != nil { + t.Errorf("err dumping missed tix: %v", err.Error()) + } + + if len(revokedTix) != 1 { + t.Errorf("revoked ticket map is wrong len, got %v, want %v", + len(revokedTix), 1) + } + + _, wasMissedIn152 := revokedTix[*revokedSlice[0]] + ticketsRevoked := wasMissedIn152 + if !ticketsRevoked { + t.Errorf("revoked ticket map did not include tickets missed in " + + "block 152 and later revoked") + } + + database.Close() + tmdb.Close() + + os.RemoveAll("ticketdb_test") + os.Remove("./ticketdb_test.ver") + os.Remove("testdata/testtmdb") + os.Remove("testdata") +} diff --git a/blockchain/stakeext.go b/blockchain/stakeext.go new file mode 100644 index 00000000..ed6b3a4f --- /dev/null +++ b/blockchain/stakeext.go @@ -0,0 +1,261 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain + +import ( + "fmt" + "sort" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" +) + +// GetNextWinningTickets returns the next tickets eligible for spending as SSGen +// on the top block. It also returns the ticket pool size. +// This function is NOT safe for concurrent access. +func (b *BlockChain) GetNextWinningTickets() ([]chainhash.Hash, int, [6]byte, + error) { + winningTickets, poolSize, finalState, _, err := + b.getWinningTicketsWithStore(b.bestChain) + if err != nil { + return nil, 0, [6]byte{}, err + } + + return winningTickets, poolSize, finalState, nil +} + +// getWinningTicketsWithStore is a helper function that returns winning tickets +// along with the ticket pool size and transaction store for the given node. +// Note that this function evaluates the lottery data predominantly for mining +// purposes; that is, it retrieves the lottery data which needs to go into +// the next block when mining on top of this block. +// This function is NOT safe for concurrent access. +func (b *BlockChain) getWinningTicketsWithStore(node *blockNode) ([]chainhash.Hash, + int, [6]byte, TicketStore, error) { + if node.height < b.chainParams.StakeEnabledHeight { + return []chainhash.Hash{}, 0, [6]byte{}, nil, nil + } + + evalLotteryWinners := false + if node.height >= b.chainParams.StakeValidationHeight-1 { + evalLotteryWinners = true + } + + block, err := b.getBlockFromHash(node.hash) + if err != nil { + return nil, 0, [6]byte{}, nil, err + } + + headerB, err := node.header.Bytes() + if err != nil { + return nil, 0, [6]byte{}, nil, err + } + + ticketStore, err := b.fetchTicketStore(node) + if err != nil { + return nil, 0, [6]byte{}, nil, + fmt.Errorf("Failed to generate ticket store for node %v; "+ + "error given: %v", node.hash, err) + } + + if ticketStore != nil { + // We need the viewpoint of spendable tickets given that the + // current block was actually added. + err = b.connectTickets(ticketStore, node, block) + if err != nil { + return nil, 0, [6]byte{}, nil, err + } + } + + // Sort the entire list of tickets lexicographically by sorting + // each bucket and then appending it to the list. + tpdBucketMap := make(map[uint8][]*TicketPatchData) + for _, tpd := range ticketStore { + // Bucket does not exist. + if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { + tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) + tpdBucketMap[tpd.td.Prefix][0] = tpd + } else { + // Bucket exists. + data := tpdBucketMap[tpd.td.Prefix] + tpdBucketMap[tpd.td.Prefix] = append(data, tpd) + } + } + totalTickets := 0 + sortedSlice := make([]*stake.TicketData, 0) + for i := 0; i < stake.BucketsSize; i++ { + ltb, err := b.GenerateLiveTicketBucket(ticketStore, tpdBucketMap, + uint8(i)) + if err != nil { + h := node.hash + str := fmt.Sprintf("Failed to generate a live ticket bucket "+ + "to evaluate the lottery data for node %v, height %v! Error "+ + "given: %v", + h, + node.height, + err.Error()) + return nil, 0, [6]byte{}, nil, fmt.Errorf(str) + } + mapLen := len(ltb) + + tempTdSlice := stake.NewTicketDataSlice(mapLen) + itr := 0 // Iterator + for _, td := range ltb { + tempTdSlice[itr] = td + itr++ + totalTickets++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + + // Use the parent block's header to seed a PRNG that picks the + // lottery winners. + winningTickets := make([]chainhash.Hash, 0) + var finalState [6]byte + stateBuffer := make([]byte, 0, + (b.chainParams.TicketsPerBlock+1)*chainhash.HashSize) + if evalLotteryWinners { + ticketsPerBlock := int(b.chainParams.TicketsPerBlock) + prng := stake.NewHash256PRNG(headerB) + ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) + if err != nil { + return nil, 0, [6]byte{}, nil, err + } + for _, idx := range ts { + winningTickets = append(winningTickets, sortedSlice[idx].SStxHash) + stateBuffer = append(stateBuffer, sortedSlice[idx].SStxHash[:]...) + } + + lastHash := prng.StateHash() + stateBuffer = append(stateBuffer, lastHash[:]...) + copy(finalState[:], chainhash.HashFuncB(stateBuffer)[0:6]) + } + + return winningTickets, totalTickets, finalState, ticketStore, nil +} + +// getWinningTicketsInclStore is a helper function for block validation that +// returns winning tickets along with the ticket pool size and transaction +// store for the given node. +// Note that this function is used for finding the lottery data when +// evaluating a block that builds on a tip, not for mining. +// This function is NOT safe for concurrent access. +func (b *BlockChain) getWinningTicketsInclStore(node *blockNode, + ticketStore TicketStore) ([]chainhash.Hash, int, [6]byte, error) { + if node.height < b.chainParams.StakeEnabledHeight { + return []chainhash.Hash{}, 0, [6]byte{}, nil + } + + evalLotteryWinners := false + if node.height >= b.chainParams.StakeValidationHeight-1 { + evalLotteryWinners = true + } + + parentHeaderB, err := node.parent.header.Bytes() + if err != nil { + return nil, 0, [6]byte{}, err + } + + // Sort the entire list of tickets lexicographically by sorting + // each bucket and then appending it to the list. + tpdBucketMap := make(map[uint8][]*TicketPatchData) + for _, tpd := range ticketStore { + // Bucket does not exist. + if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { + tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) + tpdBucketMap[tpd.td.Prefix][0] = tpd + } else { + // Bucket exists. + data := tpdBucketMap[tpd.td.Prefix] + tpdBucketMap[tpd.td.Prefix] = append(data, tpd) + } + } + totalTickets := 0 + sortedSlice := make([]*stake.TicketData, 0) + for i := 0; i < stake.BucketsSize; i++ { + ltb, err := b.GenerateLiveTicketBucket(ticketStore, tpdBucketMap, uint8(i)) + if err != nil { + h := node.hash + str := fmt.Sprintf("Failed to generate a live ticket bucket "+ + "to evaluate the lottery data for node %v, height %v! Error "+ + "given: %v", + h, + node.height, + err.Error()) + return nil, 0, [6]byte{}, fmt.Errorf(str) + } + mapLen := len(ltb) + + tempTdSlice := stake.NewTicketDataSlice(mapLen) + itr := 0 // Iterator + for _, td := range ltb { + tempTdSlice[itr] = td + itr++ + totalTickets++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + + // Use the parent block's header to seed a PRNG that picks the + // lottery winners. + winningTickets := make([]chainhash.Hash, 0) + var finalState [6]byte + stateBuffer := make([]byte, 0, + (b.chainParams.TicketsPerBlock+1)*chainhash.HashSize) + if evalLotteryWinners { + ticketsPerBlock := int(b.chainParams.TicketsPerBlock) + prng := stake.NewHash256PRNG(parentHeaderB) + ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) + if err != nil { + return nil, 0, [6]byte{}, err + } + for _, idx := range ts { + winningTickets = append(winningTickets, sortedSlice[idx].SStxHash) + stateBuffer = append(stateBuffer, sortedSlice[idx].SStxHash[:]...) + } + + lastHash := prng.StateHash() + stateBuffer = append(stateBuffer, lastHash[:]...) + copy(finalState[:], chainhash.HashFuncB(stateBuffer)[0:6]) + } + + return winningTickets, totalTickets, finalState, nil +} + +// GetWinningTickets takes a node block hash and returns the next tickets +// eligible for spending as SSGen. +// This function is NOT safe for concurrent access. +func (b *BlockChain) GetWinningTickets(nodeHash chainhash.Hash) ([]chainhash.Hash, + int, [6]byte, error) { + var node *blockNode + if n, exists := b.index[nodeHash]; exists { + node = n + } else { + node, _ = b.findNode(&nodeHash) + } + + if node == nil { + return nil, 0, [6]byte{}, fmt.Errorf("node doesn't exist") + } + + winningTickets, poolSize, finalState, _, err := + b.getWinningTicketsWithStore(node) + if err != nil { + return nil, 0, [6]byte{}, err + } + + return winningTickets, poolSize, finalState, nil +} + +// GetMissedTickets returns a list of currently missed tickets. +// This function is NOT safe for concurrent access. +func (b *BlockChain) GetMissedTickets() []chainhash.Hash { + missedTickets := b.tmdb.GetTicketHashesForMissed() + + return missedTickets +} diff --git a/blockchain/subsidy.go b/blockchain/subsidy.go new file mode 100644 index 00000000..4a899b9a --- /dev/null +++ b/blockchain/subsidy.go @@ -0,0 +1,277 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain + +import ( + "bytes" + "fmt" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +// CalcBlockSubsidy returns the subsidy amount a block at the provided height +// should have. This is mainly used for determining how much the coinbase for +// newly generated blocks awards as well as validating the coinbase for blocks +// has the expected value. +// +// Subsidy calculation for exponential reductions: +// 0 for i in range (0, height / ReductionInterval): +// 1 subsidy *= MulSubsidy +// 2 subsidy /= DivSubsidy +// +// Safe for concurrent access. +func calcBlockSubsidy(height int64, params *chaincfg.Params) int64 { + // Block height 1 subsidy is 'special' and used to + // distribute initial tokens, if any. + if height == 1 { + return params.BlockOneSubsidy() + } + + iterations := height / params.ReductionInterval + subsidy := params.BaseSubsidy + + // You could stick all these values in a LUT for faster access if you + // wanted to, but this calculation is already really fast until you + // get very very far into the blockchain. The other method you could + // use is storing the total subsidy in a block node and do the + // multiplication and division when needed when adding a block. + if iterations > 0 { + for i := int64(0); i < iterations; i++ { + subsidy *= params.MulSubsidy + subsidy /= params.DivSubsidy + } + } + + return subsidy +} + +// CalcBlockWorkSubsidy calculates the proof of work subsidy for a block as a +// proportion of the total subsidy. +func CalcBlockWorkSubsidy(height int64, voters uint16, + params *chaincfg.Params) int64 { + subsidy := calcBlockSubsidy(height, params) + proportionWork := int64(params.WorkRewardProportion) + proportions := int64(params.TotalSubsidyProportions()) + subsidy *= proportionWork + subsidy /= proportions + + // Ignore the voters field of the header before we're at a point + // where there are any voters. + if height < params.StakeValidationHeight { + return subsidy + } + + // If there are no voters, subsidy is 0. The block will fail later anyway. + if voters == 0 { + return 0 + } + + // Adjust for the number of voters. This shouldn't ever overflow if you start + // with 50 * 10^8 Atoms and voters and potentialVoters are uint16. + potentialVoters := params.TicketsPerBlock + actual := (int64(voters) * subsidy) / int64(potentialVoters) + + return actual +} + +// CalcStakeVoteSubsidy calculates the subsidy for a stake vote based on the height +// of its input SStx. +// +// Safe for concurrent access. +func CalcStakeVoteSubsidy(height int64, params *chaincfg.Params) int64 { + // Calculate the actual reward for this block, then further reduce reward + // proportional to StakeRewardProportion. + // Note that voters/potential voters is 1, so that vote reward is calculated + // irrespective of block reward. + subsidy := calcBlockSubsidy(height, params) + proportionStake := int64(params.StakeRewardProportion) + proportions := int64(params.TotalSubsidyProportions()) + subsidy *= proportionStake + subsidy /= (proportions * int64(params.TicketsPerBlock)) + + return subsidy +} + +// CalcBlockTaxSubsidy calculates the subsidy for the organization address in the +// coinbase. +// +// Safe for concurrent access. +func CalcBlockTaxSubsidy(height int64, voters uint16, + params *chaincfg.Params) int64 { + if params.BlockTaxProportion == 0 { + return 0 + } + + subsidy := calcBlockSubsidy(int64(height), params) + proportionTax := int64(params.BlockTaxProportion) + proportions := int64(params.TotalSubsidyProportions()) + subsidy *= proportionTax + subsidy /= proportions + + // Assume all voters 'present' before stake voting is turned on. + if height < params.StakeValidationHeight { + voters = 5 + } + + // If there are no voters, subsidy is 0. The block will fail later anyway. + if voters == 0 && height >= params.StakeValidationHeight { + return 0 + } + + // Adjust for the number of voters. This shouldn't ever overflow if you start + // with 50 * 10^8 Atoms and voters and potentialVoters are uint16. + potentialVoters := params.TicketsPerBlock + adjusted := (int64(voters) * subsidy) / int64(potentialVoters) + + return adjusted +} + +// BlockOneCoinbasePaysTokens checks to see if the first block coinbase pays +// out to the network initial token ledger. +func BlockOneCoinbasePaysTokens(tx *dcrutil.Tx, params *chaincfg.Params) error { + // If no ledger is specified, just return true. + if len(params.BlockOneLedger) == 0 { + return nil + } + + if tx.MsgTx().LockTime != 0 { + errStr := fmt.Sprintf("block 1 coinbase has invalid locktime") + return ruleError(ErrBlockOneTx, errStr) + } + + if tx.MsgTx().Expiry != wire.NoExpiryValue { + errStr := fmt.Sprintf("block 1 coinbase has invalid expiry") + return ruleError(ErrBlockOneTx, errStr) + } + + if tx.MsgTx().TxIn[0].Sequence != wire.MaxTxInSequenceNum { + errStr := fmt.Sprintf("block 1 coinbase not finalized") + return ruleError(ErrBlockOneInputs, errStr) + } + + if len(tx.MsgTx().TxOut) == 0 { + errStr := fmt.Sprintf("coinbase outputs empty in block 1") + return ruleError(ErrBlockOneOutputs, errStr) + } + + ledger := params.BlockOneLedger + if len(ledger) != len(tx.MsgTx().TxOut) { + errStr := fmt.Sprintf("wrong number of outputs in block 1 coinbase; "+ + "got %v, expected %v", len(tx.MsgTx().TxOut), len(ledger)) + return ruleError(ErrBlockOneOutputs, errStr) + } + + // Check the addresses and output amounts against those in the ledger. + for i, txout := range tx.MsgTx().TxOut { + if txout.Version != txscript.DefaultScriptVersion { + errStr := fmt.Sprintf("bad block one output version; want %v, got %v", + txscript.DefaultScriptVersion, txout.Version) + return ruleError(ErrBlockOneOutputs, errStr) + } + + // There should only be one address. + _, addrs, _, err := + txscript.ExtractPkScriptAddrs(txout.Version, txout.PkScript, params) + if len(addrs) != 1 { + errStr := fmt.Sprintf("too many addresses in output") + return ruleError(ErrBlockOneOutputs, errStr) + } + + addrLedger, err := dcrutil.DecodeAddress(ledger[i].Address, params) + if err != nil { + return err + } + + if !bytes.Equal(addrs[0].ScriptAddress(), addrLedger.ScriptAddress()) { + errStr := fmt.Sprintf("address in output %v has non matching "+ + "address; got %v (hash160 %x), want %v (hash160 %x)", + i, + addrs[0].EncodeAddress(), + addrs[0].ScriptAddress(), + addrLedger.EncodeAddress(), + addrLedger.ScriptAddress()) + return ruleError(ErrBlockOneOutputs, errStr) + } + + if txout.Value != ledger[i].Amount { + errStr := fmt.Sprintf("address in output %v has non matching "+ + "amount; got %v, want %v", i, txout.Value, ledger[i].Amount) + return ruleError(ErrBlockOneOutputs, errStr) + } + } + + return nil +} + +// CoinbasePaysTax checks to see if a given block's coinbase correctly pays +// tax to the developer organization. +func CoinbasePaysTax(tx *dcrutil.Tx, height uint32, voters uint16, + params *chaincfg.Params) error { + // Taxes only apply from block 2 onwards. + if height <= 1 { + return nil + } + + // Tax is disabled. + if params.BlockTaxProportion == 0 { + return nil + } + + if len(tx.MsgTx().TxOut) == 0 { + errStr := fmt.Sprintf("invalid coinbase (no outputs)") + return ruleError(ErrNoTxOutputs, errStr) + } + + // Coinbase output 0 must be the subsidy to the dev organization. + taxPkVersion := tx.MsgTx().TxOut[0].Version + taxPkScript := tx.MsgTx().TxOut[0].PkScript + class, addrs, _, err := + txscript.ExtractPkScriptAddrs(taxPkVersion, taxPkScript, params) + // The script can't be a weird class. + if !(class == txscript.ScriptHashTy || + class == txscript.PubKeyHashTy || + class == txscript.PubKeyTy) { + errStr := fmt.Sprintf("wrong script class for tax output") + return ruleError(ErrNoTax, errStr) + } + + // There should only be one address. + if len(addrs) != 1 { + errStr := fmt.Sprintf("no or too many addresses in output") + return ruleError(ErrNoTax, errStr) + } + + // Decode the organization address. + addrOrg, err := dcrutil.DecodeAddress(params.OrganizationAddress, params) + if err != nil { + return err + } + + if !bytes.Equal(addrs[0].ScriptAddress(), addrOrg.ScriptAddress()) { + errStr := fmt.Sprintf("address in output 0 has non matching org "+ + "address; got %v (hash160 %x), want %v (hash160 %x)", + addrs[0].EncodeAddress(), + addrs[0].ScriptAddress(), + addrOrg.EncodeAddress(), + addrOrg.ScriptAddress()) + return ruleError(ErrNoTax, errStr) + } + + // Get the amount of subsidy that should have been paid out to + // the organization, then check it. + orgSubsidy := CalcBlockTaxSubsidy(int64(height), voters, params) + amountFound := tx.MsgTx().TxOut[0].Value + if orgSubsidy != amountFound { + errStr := fmt.Sprintf("amount in output 0 has non matching org "+ + "calculated amount; got %v, want %v", amountFound, orgSubsidy) + return ruleError(ErrNoTax, errStr) + } + + return nil +} diff --git a/blockchain/subsidy_test.go b/blockchain/subsidy_test.go new file mode 100644 index 00000000..02c85f91 --- /dev/null +++ b/blockchain/subsidy_test.go @@ -0,0 +1,55 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain_test + +import ( + "testing" + + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg" +) + +func TestBlockSubsidy(t *testing.T) { + mainnet := &chaincfg.MainNetParams + totalSubsidy := mainnet.BlockOneSubsidy() + for i := int64(0); ; i++ { + // Genesis block or first block. + if i == 0 || i == 1 { + continue + } + + if i%mainnet.ReductionInterval == 0 { + numBlocks := mainnet.ReductionInterval + // First reduction internal, which is reduction interval - 2 + // to skip the genesis block and block one. + if i == mainnet.ReductionInterval { + numBlocks -= 2 + } + height := i - numBlocks + + work := blockchain.CalcBlockWorkSubsidy(height, + mainnet.TicketsPerBlock, mainnet) + stake := blockchain.CalcStakeVoteSubsidy(height, mainnet) * + int64(mainnet.TicketsPerBlock) + tax := blockchain.CalcBlockTaxSubsidy(height, mainnet.TicketsPerBlock, + mainnet) + if (work + stake + tax) == 0 { + break + } + totalSubsidy += ((work + stake + tax) * numBlocks) + + // First reduction internal, subtract the stake subsidy for + // blocks before the staking system is enabled. + if i == mainnet.ReductionInterval { + totalSubsidy -= stake * (mainnet.StakeValidationHeight - 2) + } + } + } + + if totalSubsidy != 2099999999800912 { + t.Errorf("Bad total subsidy; want 2099999999800912, got %v", totalSubsidy) + } +} diff --git a/blockchain/testdata/277647.dat.bz2 b/blockchain/testdata/277647.dat.bz2 deleted file mode 100644 index 598420a6..00000000 Binary files a/blockchain/testdata/277647.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/277647.txstore.bz2 b/blockchain/testdata/277647.txstore.bz2 deleted file mode 100644 index e3e38964..00000000 Binary files a/blockchain/testdata/277647.txstore.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_0_to_4.dat.bz2 b/blockchain/testdata/blk_0_to_4.dat.bz2 deleted file mode 100644 index 274c710d..00000000 Binary files a/blockchain/testdata/blk_0_to_4.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_3A.dat.bz2 b/blockchain/testdata/blk_3A.dat.bz2 deleted file mode 100644 index 01266565..00000000 Binary files a/blockchain/testdata/blk_3A.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_4A.dat.bz2 b/blockchain/testdata/blk_4A.dat.bz2 deleted file mode 100644 index 19b409e7..00000000 Binary files a/blockchain/testdata/blk_4A.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blk_5A.dat.bz2 b/blockchain/testdata/blk_5A.dat.bz2 deleted file mode 100644 index 47bff903..00000000 Binary files a/blockchain/testdata/blk_5A.dat.bz2 and /dev/null differ diff --git a/blockchain/testdata/blocks0to168.bz2 b/blockchain/testdata/blocks0to168.bz2 new file mode 100644 index 00000000..73909a17 Binary files /dev/null and b/blockchain/testdata/blocks0to168.bz2 differ diff --git a/blockchain/testdata/reorgtest.hex b/blockchain/testdata/reorgtest.hex deleted file mode 100644 index 5b9e75e7..00000000 --- a/blockchain/testdata/reorgtest.hex +++ /dev/null @@ -1,180 +0,0 @@ -File path: reorgTest/blk_0_to_4.dat - -Block 0: - f9beb4d9 - 1d010000 - - 01000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 3ba3edfd 7a7b12b2 7ac72c3e 67768f61 7fc81bc3 888a5132 3a9fb8aa - 4b1e5e4a 29ab5f49 ffff001d 1dac2b7c - 01 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff4d04ff ff001d01 04455468 65205469 6d657320 30332f4a - 616e2f32 30303920 4368616e 63656c6c 6f72206f 6e206272 696e6b20 6f662073 - 65636f6e 64206261 696c6f75 7420666f 72206261 6e6b73ff ffffff01 00f2052a - 01000000 43410467 8afdb0fe 55482719 67f1a671 30b7105c d6a828e0 3909a679 - 62e0ea1f 61deb649 f6bc3f4c ef38c4f3 5504e51e c112de5c 384df7ba 0b8d578a - 4c702b6b f11d5fac 00000000 -Block 1: - f9beb4d9 - d4000000 - - 01000000 6fe28c0a b6f1b372 c1a6a246 ae63f74f 931e8365 e15a089c 68d61900 - 00000000 3bbd67ad e98fbbb7 0718cd80 f9e9acf9 3b5fae91 7bb2b41d 4c3bb82c - 77725ca5 81ad5f49 ffff001d 44e69904 - 01 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff04722f 2e2bffff ffff0100 f2052a01 00000043 41046868 - 0737c76d abb801cb 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 - b5ac9e8b 4c9f49be 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ac00 - 000000 -Block 2: - f9beb4d9 - 95010000 - - 01000000 13ca7940 4c11c63e ca906bbd f190b751 2872b857 1b5143ae e8cb5737 - 00000000 fc07c983 d7391736 0aeda657 29d0d4d3 2533eb84 76ee9d64 aa27538f - 9b4fc00a d9af5f49 ffff001d 630bea22 - 02 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff04eb96 14e5ffff ffff0100 f2052a01 00000043 41046868 - 0737c76d abb801cb 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 - b5ac9e8b 4c9f49be 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ac00 - 000000 - - 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 - 4fdcb8ee d2000000 004a4930 46022100 3dde52c6 5e339f45 7fe1015e 70eed208 - 872eb71e dd484c07 206b190e cb2ec3f8 02210011 c78dcfd0 3d43fa63 61242a33 - 6291ba2a 8c1ef5bc d5472126 2468f2bf 8dee4d01 ffffffff 0200ca9a 3b000000 - 001976a9 14cb2abd e8bccacc 32e893df 3a054b9e f7f227a4 ce88ac00 286bee00 - 00000019 76a914ee 26c56fc1 d942be8d 7a24b2a1 001dd894 69398088 ac000000 - 00 -Block 3: - f9beb4d9 - 96020000 - - 01000000 7d338254 0506faab 0d4cf179 45dda023 49db51f9 6233f24c 28002258 - 00000000 4806fe80 bf85931b 882ea645 77ca5a03 22bb8af2 3f277b20 55f160cd - 972c8e8b 31b25f49 ffff001d e8f0c653 - 03 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff044abd 8159ffff ffff0100 f2052a01 00000043 4104b95c - 249d84f4 17e3e395 a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c - a5e56c90 f340988d 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ac00 - 000000 - - 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb - bad253d3 77010000 008c4930 46022100 96ee0d02 b35fd61e 4960b44f f396f67e - 01fe17f9 de4e0c17 b6a963bd ab2b50a6 02210034 920d4daa 7e9f8abe 5675c931 - 495809f9 0b9c1189 d05fbaf1 dd6696a5 b0d8f301 41046868 0737c76d abb801cb - 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 b5ac9e8b 4c9f49be - 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ffff ffff0100 286bee00 - 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 - 00 - - 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb - bad253d3 77000000 008c4930 46022100 b08b922a c4bde411 1c229f92 9fe6eb6a - 50161f98 1f4cf47e a9214d35 bf74d380 022100d2 f6640327 e677a1e1 cc474991 - b9a48ba5 bd1e0c94 d1c8df49 f7b0193b 7ea4fa01 4104b95c 249d84f4 17e3e395 - a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c a5e56c90 f340988d - 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ffff ffff0100 ca9a3b00 - 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 - 00 - -Block 4: - f9beb4d9 - 73010000 - - 01000000 5da36499 06f35e09 9be42a1d 87b6dd42 11bc1400 6c220694 0807eaae - 00000000 48eeeaed 2d9d8522 e6201173 743823fd 4b87cd8a ca8e6408 ec75ca38 - 302c2ff0 89b45f49 ffff001d 00530839 - 02 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff04d41d 2213ffff ffff0100 f2052a01 00000043 4104678a - fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 - bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 - 000000 - - 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 - 4fdcb8ee d2000000 004a4930 46022100 8c8fd57b 48762135 8d8f3e69 19f33e08 - 804736ff 83db47aa 248512e2 6df9b8ba 022100b0 c59e5ee7 bfcbfcd1 a4d83da9 - 55fb260e fda7f42a 25522625 a3d6f2d9 1174a701 ffffffff 0100f205 2a010000 - 001976a9 14c52266 4fb0e55c dc5c0cea 73b4aad9 7ec83432 3288ac00 000000 - -File path: reorgTest/blk_3A.dat -Block 3A: - f9beb4d9 - 96020000 - - 01000000 7d338254 0506faab 0d4cf179 45dda023 49db51f9 6233f24c 28002258 - 00000000 5a15f573 1177a353 bdca7aab 20e16624 dfe90adc 70accadc 68016732 - 302c20a7 31b25f49 ffff001d 6a901440 - 03 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff04ad1b e7d5ffff ffff0100 f2052a01 00000043 4104ed83 - 704c95d8 29046f1a c2780621 1132102c 34e9ac7f fa1b7111 0658e5b9 d1bdedc4 - 16f5cefc 1db0625c d0c75de8 192d2b59 2d7e3b00 bcfb4a0e 860d880f d1fcac00 - 000000 - - 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb - bad253d3 77010000 008c4930 46022100 96ee0d02 b35fd61e 4960b44f f396f67e - 01fe17f9 de4e0c17 b6a963bd ab2b50a6 02210034 920d4daa 7e9f8abe 5675c931 - 495809f9 0b9c1189 d05fbaf1 dd6696a5 b0d8f301 41046868 0737c76d abb801cb - 2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 b5ac9e8b 4c9f49be - 5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ffff ffff0100 286bee00 - 00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000 - 00 - - 01000000 01f287b5 e067e1cf 80f7da8a f89917b5 505094db d82412d9 35b665eb - bad253d3 77000000 008c4930 46022100 9cc67ddd aa6f592a 6b2babd4 d6ff954f - 25a784cf 4fe4bb13 afb9f49b 08955119 022100a2 d99545b7 94080757 fcf2b563 - f2e91287 86332f46 0ec6b90f f085fb28 41a69701 4104b95c 249d84f4 17e3e395 - a1274254 28b54067 1cc15881 eb828c17 b722a53f c599e21c a5e56c90 f340988d - 3933acc7 6beb832f d64cab07 8ddf3ce7 32923031 d1a8ffff ffff0100 ca9a3b00 - 00000019 76a914ee 26c56fc1 d942be8d 7a24b2a1 001dd894 69398088 ac000000 - 00 - -File path: reorgTest/blk_4A.dat -Block 4A: - f9beb4d9 - d4000000 - - 01000000 aae77468 2205667d 4f413a58 47cc8fe8 9795f1d5 645d5b24 1daf3c92 - 00000000 361c9cde a09637a0 d0c05c3b 4e7a5d91 9edb184a 0a4c7633 d92e2ddd - f04cb854 89b45f49 ffff001d 9e9aa1e8 - 01 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff0401b8 f3eaffff ffff0100 f2052a01 00000043 4104678a - fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 - bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 - 000000 - -File path: reorgTest/blk_5A.dat -Block 5A: - f9beb4d9 - 73010000 - - 01000000 ebc7d0de 9c31a71b 7f41d275 2c080ba4 11e1854b d45cb2cf 8c1e4624 - 00000000 a607774b 79b8eb50 b52a5a32 c1754281 ec67f626 9561df28 57d1fe6a - ea82c696 e1b65f49 ffff001d 4a263577 - 02 - - 01000000 01000000 00000000 00000000 00000000 00000000 00000000 00000000 - 00000000 00ffffff ff049971 0c7dffff ffff0100 f2052a01 00000043 4104678a - fdb0fe55 48271967 f1a67130 b7105cd6 a828e039 09a67962 e0ea1f61 deb649f6 - bc3f4cef 38c4f355 04e51ec1 12de5c38 4df7ba0b 8d578a4c 702b6bf1 1d5fac00 - 000000 - - 01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207 - 4fdcb8ee d2000000 004a4930 46022100 8c8fd57b 48762135 8d8f3e69 19f33e08 - 804736ff 83db47aa 248512e2 6df9b8ba 022100b0 c59e5ee7 bfcbfcd1 a4d83da9 - 55fb260e fda7f42a 25522625 a3d6f2d9 1174a701 ffffffff 0100f205 2a010000 - 001976a9 14c52266 4fb0e55c dc5c0cea 73b4aad9 7ec83432 3288ac00 000000 - diff --git a/blockchain/testdata/reorgto179.bz2 b/blockchain/testdata/reorgto179.bz2 new file mode 100644 index 00000000..19c91ff9 Binary files /dev/null and b/blockchain/testdata/reorgto179.bz2 differ diff --git a/blockchain/testdata/reorgto180.bz2 b/blockchain/testdata/reorgto180.bz2 new file mode 100644 index 00000000..60fab201 Binary files /dev/null and b/blockchain/testdata/reorgto180.bz2 differ diff --git a/blockchain/ticketlookup.go b/blockchain/ticketlookup.go new file mode 100644 index 00000000..3cebbef0 --- /dev/null +++ b/blockchain/ticketlookup.go @@ -0,0 +1,645 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain + +import ( + "errors" + "fmt" + "sort" + + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" +) + +// TicketStatus is used to indicate the state of a ticket in the ticket store and +// the ticket database. Non-existing is included because it's possible to have +// a ticket not exist in a sidechain that exists in the mainchain (and thus exists +// in the ticket database), and so you need to indicate this in the ticket store. +// It could also point to a ticket that's been missed and eliminated from the +// ticket db by SSRtx. +type TicketStatus int + +const ( + TiNonexisting = iota + TiSpent + TiAvailable + TiMissed + TiRevoked + TiError +) + +// TicketPatchData contains contextual information about tickets, namely their +// ticket data and whether or not they are spent. +type TicketPatchData struct { + td *stake.TicketData + ts TicketStatus + err error +} + +// NewTicketPatchData creates a new TicketPatchData struct. +func NewTicketPatchData(td *stake.TicketData, + ts TicketStatus, + err error) *TicketPatchData { + return &TicketPatchData{td, ts, err} +} + +// TicketStore is used to store a patch of the ticket db for use in validating the +// block header and subsequently the block reward. It allows you to observe the +// ticket db from the point-of-view of different points in the chain. +// TicketStore is basically like an extremely inefficient version of the ticket +// database that isn't designed to be able to be easily rolled back, which is fine +// because we're only going to use it in ephermal cases. +type TicketStore map[chainhash.Hash]*TicketPatchData + +// GenerateLiveTicketBucket takes ticket patch data and a bucket number as input, +// then recreates a ticket bucket from the patch and the current database state. +func (b *BlockChain) GenerateLiveTicketBucket(ticketStore TicketStore, + tpdBucketMap map[uint8][]*TicketPatchData, bucket uint8) (stake.SStxMemMap, + error) { + bucketTickets := make(stake.SStxMemMap) + + // Check the ticketstore for live tickets and add these to the bucket if + // their ticket number matches. + for _, tpd := range tpdBucketMap[bucket] { + if tpd.ts == TiAvailable { + bucketTickets[tpd.td.SStxHash] = tpd.td + } + } + + // Check the ticket database for live tickets; prune live tickets that + // have been spent/missed/were otherwise accounted for. + liveTicketsFromDb, err := b.tmdb.DumpLiveTickets(bucket) + if err != nil { + return nil, err + } + + for hash, td := range liveTicketsFromDb { + if _, exists := ticketStore[hash]; exists { + continue + } + bucketTickets[hash] = td + } + + return bucketTickets, nil +} + +// GenerateMissedTickets takes ticket patch data as input, then recreates the +// missed tickets bucket from the patch and the current database state. +func (b *BlockChain) GenerateMissedTickets(tixStore TicketStore) (stake.SStxMemMap, + error) { + missedTickets := make(stake.SStxMemMap) + + // Check the ticketstore for live tickets and add these to the bucket if + // their ticket number matches. + for hash, tpd := range tixStore { + if tpd.ts == TiMissed { + missedTickets[hash] = tpd.td + } + } + + // Check the ticket database for live tickets; prune live tickets that + // have been spent/missed/were otherwise accounted for. + missedTicketsFromDb, err := b.tmdb.DumpMissedTickets() + if err != nil { + return nil, err + } + + for hash, td := range missedTicketsFromDb { + if _, exists := tixStore[hash]; exists { + continue + } + missedTickets[hash] = td + } + + return missedTickets, nil +} + +// connectTickets updates the passed map by removing removing any tickets +// from the ticket pool that have been considered spent or missed in this block +// according to the block header. Then, it connects all the newly mature tickets +// to the passed map. +func (b *BlockChain) connectTickets(tixStore TicketStore, + node *blockNode, + block *dcrutil.Block) error { + if tixStore == nil { + return fmt.Errorf("nil ticket store!") + } + + // Nothing to do if tickets haven't yet possibly matured. + height := node.height + if height < b.chainParams.StakeEnabledHeight { + return nil + } + + parentBlock, err := b.GetBlockFromHash(node.parentHash) + if err != nil { + return err + } + + revocations := node.header.Revocations + + tM := int64(b.chainParams.TicketMaturity) + + // Skip a number of validation steps before we requiring chain + // voting. + if node.height >= b.chainParams.StakeValidationHeight { + regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits, + dcrutil.BlockValid) + thisNodeStakeViewpoint := ViewpointPrevInvalidStake + if regularTxTreeValid { + thisNodeStakeViewpoint = ViewpointPrevValidStake + } + + // We need the missed tickets bucket from the original perspective of + // the node. + missedTickets, err := b.GenerateMissedTickets(tixStore) + if err != nil { + return err + } + + // TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if + // validated) for this node. + txInputStoreStake, err := b.fetchInputTransactions(node, block, + thisNodeStakeViewpoint) + if err != nil { + errStr := fmt.Sprintf("fetchInputTransactions failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return errors.New(errStr) + } + + // PART 1: Spend/miss winner tickets + + // Iterate through all the SSGen (vote) tx in the block and add them to + // a map of tickets that were actually used. + spentTicketsFromBlock := make(map[chainhash.Hash]bool) + numberOfSSgen := 0 + for _, staketx := range block.STransactions() { + if is, _ := stake.IsSSGen(staketx); is { + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[1] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + originTx, exists := txInputStoreStake[sstxHash] + if !exists { + str := fmt.Sprintf("unable to find input transaction "+ + "%v for transaction %v", sstxHash, staketx.Sha()) + return ruleError(ErrMissingTx, str) + } + + sstxHeight := originTx.BlockHeight + + // Check maturity of ticket; we can only spend the ticket after it + // hits maturity at height + tM + 1. + if (height - sstxHeight) < (tM + 1) { + blockSha := block.Sha() + errStr := fmt.Sprintf("Error: A ticket spend as an SSGen in "+ + "block height %v was immature! Block sha %v", + height, + blockSha) + return errors.New(errStr) + } + + // Fill out the ticket data. + spentTicketsFromBlock[sstxHash] = true + numberOfSSgen++ + } + } + + // Obtain the TicketsPerBlock many tickets that were selected this round, + // then check these against the tickets that were actually used to make + // sure that any SSGen actually match the selected tickets. Commit the + // spent or missed tickets to the ticket store after. + spentAndMissedTickets := make(TicketStore) + tixSpent := 0 + tixMissed := 0 + + // Sort the entire list of tickets lexicographically by sorting + // each bucket and then appending it to the list. Start by generating + // a prefix matched map of tickets to speed up the lookup. + tpdBucketMap := make(map[uint8][]*TicketPatchData) + for _, tpd := range tixStore { + // Bucket does not exist. + if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { + tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) + tpdBucketMap[tpd.td.Prefix][0] = tpd + } else { + // Bucket exists. + data := tpdBucketMap[tpd.td.Prefix] + tpdBucketMap[tpd.td.Prefix] = append(data, tpd) + } + } + totalTickets := 0 + sortedSlice := make([]*stake.TicketData, 0) + for i := 0; i < stake.BucketsSize; i++ { + ltb, err := b.GenerateLiveTicketBucket(tixStore, tpdBucketMap, + uint8(i)) + if err != nil { + h := node.hash + str := fmt.Sprintf("Failed to generate live ticket bucket "+ + "%v for node %v, height %v! Error: %v", + i, + h, + node.height, + err.Error()) + return fmt.Errorf(str) + } + mapLen := len(ltb) + + tempTdSlice := stake.NewTicketDataSlice(mapLen) + itr := 0 // Iterator + for _, td := range ltb { + tempTdSlice[itr] = td + itr++ + totalTickets++ + } + sort.Sort(tempTdSlice) + sortedSlice = append(sortedSlice, tempTdSlice...) + } + + // Use the parent block's header to seed a PRNG that picks the + // lottery winners. + ticketsPerBlock := int(b.chainParams.TicketsPerBlock) + pbhB, err := parentBlock.MsgBlock().Header.Bytes() + if err != nil { + return err + } + prng := stake.NewHash256PRNG(pbhB) + ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) + if err != nil { + return err + } + + ticketsToSpendOrMiss := make([]*stake.TicketData, ticketsPerBlock, + ticketsPerBlock) + for i, idx := range ts { + ticketsToSpendOrMiss[i] = sortedSlice[idx] + } + + // Spend or miss these tickets by checking for their existence in the + // passed spentTicketsFromBlock map. + for _, ticket := range ticketsToSpendOrMiss { + // Move the ticket from active tickets map into the used tickets + // map if the ticket was spent. + wasSpent, _ := spentTicketsFromBlock[ticket.SStxHash] + + if wasSpent { + tpd := NewTicketPatchData(ticket, TiSpent, nil) + spentAndMissedTickets[ticket.SStxHash] = tpd + tixSpent++ + } else { // Ticket missed being spent and --> false or nil + tpd := NewTicketPatchData(ticket, TiMissed, nil) + spentAndMissedTickets[ticket.SStxHash] = tpd + tixMissed++ + } + } + + // This error is thrown if for some reason there exists an SSGen in + // the block that doesn't spend a ticket from the eligible list of + // tickets, thus making it invalid. + if tixSpent != numberOfSSgen { + errStr := fmt.Sprintf("an invalid number %v "+ + "tickets was spent, but %v many tickets should "+ + "have been spent!", tixSpent, numberOfSSgen) + return errors.New(errStr) + } + + if tixMissed != (ticketsPerBlock - numberOfSSgen) { + errStr := fmt.Sprintf("an invalid number %v "+ + "tickets was missed, but %v many tickets should "+ + "have been missed!", tixMissed, + ticketsPerBlock-numberOfSSgen) + return errors.New(errStr) + } + + if (tixSpent + tixMissed) != int(b.chainParams.TicketsPerBlock) { + errStr := fmt.Sprintf("an invalid number %v "+ + "tickets was spent and missed, but TicketsPerBlock %v many "+ + "tickets should have been spent!", tixSpent, + ticketsPerBlock) + return errors.New(errStr) + } + + // Calculate all the tickets expiring this block and mark them as missed. + tpdBucketMap = make(map[uint8][]*TicketPatchData) + for _, tpd := range tixStore { + // Bucket does not exist. + if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { + tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) + tpdBucketMap[tpd.td.Prefix][0] = tpd + } else { + // Bucket exists. + data := tpdBucketMap[tpd.td.Prefix] + tpdBucketMap[tpd.td.Prefix] = append(data, tpd) + } + } + toExpireHeight := node.height - int64(b.chainParams.TicketExpiry) + if !(toExpireHeight < int64(b.chainParams.StakeEnabledHeight)) { + for i := 0; i < stake.BucketsSize; i++ { + // Generate the live ticket bucket. + ltb, err := b.GenerateLiveTicketBucket(tixStore, + tpdBucketMap, uint8(i)) + if err != nil { + return err + } + + for _, ticket := range ltb { + if ticket.BlockHeight == toExpireHeight { + tpd := NewTicketPatchData(ticket, TiMissed, nil) + spentAndMissedTickets[ticket.SStxHash] = tpd + } + } + } + } + + // Merge the ticket store patch containing the spent and missed tickets + // with the ticket store. + for hash, tpd := range spentAndMissedTickets { + tixStore[hash] = tpd + } + + // At this point our tixStore now contains all the spent and missed tx + // as per this block. + + // PART 2: Remove tickets that were missed and are now revoked. + + // Iterate through all the SSGen (vote) tx in the block and add them to + // a map of tickets that were actually used. + revocationsFromBlock := make(map[chainhash.Hash]struct{}) + numberOfSSRtx := 0 + for _, staketx := range block.STransactions() { + if is, _ := stake.IsSSRtx(staketx); is { + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + // Fill out the ticket data. + revocationsFromBlock[sstxHash] = struct{}{} + numberOfSSRtx++ + } + } + + if numberOfSSRtx != int(revocations) { + errStr := fmt.Sprintf("an invalid revocations %v was calculated "+ + "the block header indicates %v instead", numberOfSSRtx, + revocations) + return errors.New(errStr) + } + + // Lookup the missed ticket. If we find it in the patch data, + // modify the patch data so that it doesn't exist. + // Otherwise, just modify load the missed ticket data from + // the ticket db and create patch data based on that. + for hash, _ := range revocationsFromBlock { + ticketWasMissed := false + if td, is := missedTickets[hash]; is { + maturedHeight := td.BlockHeight + + // Check maturity of ticket; we can only spend the ticket after it + // hits maturity at height + tM + 2. + if height < maturedHeight+2 { + blockSha := block.Sha() + errStr := fmt.Sprintf("Error: A ticket spend as an "+ + "SSRtx in block height %v was immature! Block sha %v", + height, + blockSha) + return errors.New(errStr) + } + + ticketWasMissed = true + } + + if !ticketWasMissed { + errStr := fmt.Sprintf("SSRtx spent missed sstx %v, "+ + "but that missed sstx could not be found!", + hash) + return errors.New(errStr) + } + } + } + + // PART 3: Add newly maturing tickets + // This is the only chunk we need to do for blocks appearing before + // stake validation height. + + // Calculate block number for where new tickets are maturing from and retrieve + // this block from db. + + // Get the block that is maturing. + matureNode, err := b.getNodeAtHeightFromTopNode(node, tM) + if err != nil { + return err + } + + matureBlock, errBlock := b.getBlockFromHash(matureNode.hash) + if errBlock != nil { + return errBlock + } + + // Maturing tickets are from the maturingBlock; fill out the ticket patch data + // and then push them to the tixStore. + for _, stx := range matureBlock.STransactions() { + if is, _ := stake.IsSStx(stx); is { + // Calculate the prefix for pre-sort. + sstxHash := *stx.Sha() + prefix := uint8(sstxHash[0]) + + // Fill out the ticket data. + td := stake.NewTicketData(sstxHash, + prefix, + chainhash.Hash{}, + height, + false, // not missed + false) // not expired + + tpd := NewTicketPatchData(td, + TiAvailable, + nil) + tixStore[*stx.Sha()] = tpd + } + } + + return nil +} + +// disconnectTransactions updates the passed map by undoing transaction and +// spend information for all transactions in the passed block. Only +// transactions in the passed map are updated. +// This function should only ever have to disconnect transactions from the main +// chain, so most of the calls are directly the the tmdb which contains all this +// data in an organized bucket. +func (b *BlockChain) disconnectTickets(tixStore TicketStore, + node *blockNode, + block *dcrutil.Block) error { + + tM := int64(b.chainParams.TicketMaturity) + height := node.height + + // Nothing to do if tickets haven't yet possibly matured. + if height < b.chainParams.StakeEnabledHeight { + return nil + } + + // PART 1: Remove newly maturing tickets + + // Calculate block number for where new tickets matured from and retrieve + // this block from db. + matureNode, err := b.getNodeAtHeightFromTopNode(node, tM) + if err != nil { + return err + } + + matureBlock, errBlock := b.getBlockFromHash(matureNode.hash) + if errBlock != nil { + return errBlock + } + + // Store pointers to empty ticket data in the ticket store and mark them as + // non-existing. + for _, stx := range matureBlock.STransactions() { + if is, _ := stake.IsSStx(stx); is { + // Leave this pointing to nothing, as the ticket technically does not + // exist. It may exist when we add blocks later, but we can fill it + // out then. + td := &stake.TicketData{} + + tpd := NewTicketPatchData(td, + TiNonexisting, + nil) + + tixStore[*stx.Sha()] = tpd + } + } + + // PART 2: Unrevoke any SSRtx in this block and restore them as + // missed tickets. + for _, stx := range block.STransactions() { + if is, _ := stake.IsSSRtx(stx); is { + // Move the revoked ticket to missed tickets. Obtain the + // revoked ticket data from the ticket database. + msgTx := stx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + td := b.tmdb.GetRevokedTicket(sstxHash) + if td == nil { + return fmt.Errorf("Failed to find revoked ticket %v in tmdb", + sstxHash) + } + + tpd := NewTicketPatchData(td, + TiMissed, + nil) + + tixStore[sstxHash] = tpd + } + } + + // PART 3: Unspend or unmiss all tickets spent/missed/expired at this block. + + // Query the stake db for used tickets (spentTicketDb), which includes all of + // the spent and missed tickets. + spentTickets, errDump := b.tmdb.DumpSpentTickets(height) + if errDump != nil { + return errDump + } + + // Move all of these tickets into the ticket store as available tickets. + for hash, td := range spentTickets { + tpd := NewTicketPatchData(td, + TiAvailable, + nil) + + tixStore[hash] = tpd + } + + return nil +} + +// fetchTicketStore fetches ticket data from the point of view of the given node. +// For example, a given node might be down a side chain where a ticket hasn't been +// spent from its point of view even though it might have been spent in the main +// chain (or another side chain). Another scenario is where a ticket exists from +// the point of view of the main chain, but doesn't exist in a side chain that +// branches before the block that contains the ticket on the main chain. +func (b *BlockChain) fetchTicketStore(node *blockNode) (TicketStore, error) { + tixStore := make(TicketStore) + + // Get the previous block node. This function is used over simply + // accessing node.parent directly as it will dynamically create previous + // block nodes as needed. This helps allow only the pieces of the chain + // that are needed to remain in memory. + prevNode, err := b.getPrevNodeFromNode(node) + if err != nil { + return nil, err + } + + // If we haven't selected a best chain yet or we are extending the main + // (best) chain with a new block, just use the ticket database we already + // have. + if b.bestChain == nil || (prevNode != nil && + prevNode.hash.IsEqual(b.bestChain.hash)) { + return nil, nil + } + + // We don't care about nodes before stake enabled height. + if node.height < b.chainParams.StakeEnabledHeight { + return nil, nil + } + + // The requested node is either on a side chain or is a node on the main + // chain before the end of it. In either case, we need to undo the + // transactions and spend information for the blocks which would be + // disconnected during a reorganize to the point of view of the + // node just before the requested node. + detachNodes, attachNodes, err := b.getReorganizeNodes(prevNode) + if err != nil { + return nil, err + } + + for e := detachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block, err := b.db.FetchBlockBySha(n.hash) + if err != nil { + return nil, err + } + + err = b.disconnectTickets(tixStore, n, block) + if err != nil { + return nil, err + } + } + + // The ticket store is now accurate to either the node where the + // requested node forks off the main chain (in the case where the + // requested node is on a side chain), or the requested node itself if + // the requested node is an old node on the main chain. Entries in the + // attachNodes list indicate the requested node is on a side chain, so + // if there are no nodes to attach, we're done. + if attachNodes.Len() == 0 { + return tixStore, nil + } + + // The requested node is on a side chain, so we need to apply the + // transactions and spend information from each of the nodes to attach. + for e := attachNodes.Front(); e != nil; e = e.Next() { + n := e.Value.(*blockNode) + block, exists := b.blockCache[*n.hash] + if !exists { + return nil, fmt.Errorf("unable to find block %v in "+ + "side chain cache for ticket db patch construction", + n.hash) + } + + // The number of blocks below this block but above the root of the fork + err = b.connectTickets(tixStore, n, block) + if err != nil { + return nil, err + } + } + + return tixStore, nil +} diff --git a/blockchain/timesorter.go b/blockchain/timesorter.go index 516fe4fd..5ff2e309 100644 --- a/blockchain/timesorter.go +++ b/blockchain/timesorter.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/blockchain/timesorter_test.go b/blockchain/timesorter_test.go index ca1b4afb..834415c3 100644 --- a/blockchain/timesorter_test.go +++ b/blockchain/timesorter_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,7 +11,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/blockchain" + "github.com/decred/dcrd/blockchain" ) // TestTimeSorter tests the timeSorter implementation. diff --git a/blockchain/txlookup.go b/blockchain/txlookup.go index 689f0d25..4305fc1c 100644 --- a/blockchain/txlookup.go +++ b/blockchain/txlookup.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,17 +8,48 @@ package blockchain import ( "fmt" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) +// There are five potential viewpoints we need to worry about. +// ViewpointPrevValidInitial: Viewpoint from the perspective of the +// everything up the the previous block's TxTreeRegular, used to validate +// that tx tree regular. +const ViewpointPrevValidInitial = int8(0) + +// ViewpointPrevValidStake: Viewpoint from the perspective of the +// everything up the the previous block's TxTreeRegular plus the +// contents of the TxTreeRegular, to validate TxTreeStake. +const ViewpointPrevValidStake = int8(1) + +// ViewpointPrevInvalidStake: Viewpoint from the perspective of the +// everything up the the previous block's TxTreeRegular but without the +// contents of the TxTreeRegular, to validate TxTreeStake. +const ViewpointPrevInvalidStake = int8(2) + +// ViewpointPrevValidRegular: Viewpoint from the perspective of the +// everything up the the previous block's TxTreeRegular plus the +// contents of the TxTreeRegular and TxTreeStake of current block, +// to validate TxTreeRegular of the current block. +const ViewpointPrevValidRegular = int8(3) + +// ViewpointPrevInvalidRegular: Viewpoint from the perspective of the +// everything up the the previous block's TxTreeRegular minus the +// contents of the TxTreeRegular and TxTreeStake of current block, +// to validate TxTreeRegular of the current block. +const ViewpointPrevInvalidRegular = int8(4) + // TxData contains contextual information about transactions such as which block // they were found in and whether or not the outputs are spent. type TxData struct { - Tx *btcutil.Tx - Hash *wire.ShaHash + Tx *dcrutil.Tx + Hash *chainhash.Hash BlockHeight int64 + BlockIndex uint32 Spent []bool Err error } @@ -26,21 +58,32 @@ type TxData struct { // such as script validation and double spend prevention. This also allows the // transaction data to be treated as a view since it can contain the information // from the point-of-view of different points in the chain. -type TxStore map[wire.ShaHash]*TxData +type TxStore map[chainhash.Hash]*TxData + +// connectTxTree lets you connect an arbitrary TxTree to a txStore to push it +// forward in history. +// TxTree true == TxTreeRegular +// TxTree false == TxTreeStake +func connectTxTree(txStore TxStore, + block *dcrutil.Block, + txTree bool) { + var transactions []*dcrutil.Tx + if txTree { + transactions = block.Transactions() + } else { + transactions = block.STransactions() + } -// connectTransactions updates the passed map by applying transaction and -// spend information for all the transactions in the passed block. Only -// transactions in the passed map are updated. -func connectTransactions(txStore TxStore, block *btcutil.Block) error { // Loop through all of the transactions in the block to see if any of // them are ones we need to update and spend based on the results map. - for _, tx := range block.Transactions() { + for i, tx := range transactions { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() + txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } @@ -50,7 +93,87 @@ func connectTransactions(txStore TxStore, block *btcutil.Block) error { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { - if originIndex > uint32(len(originTx.Spent)) { + if originTx.Spent == nil { + continue + } + if originIndex >= uint32(len(originTx.Spent)) { + continue + } + originTx.Spent[originIndex] = true + } + } + } + + return +} + +func connectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { + // There is no regular tx from before the genesis block, so ignore the genesis + // block for the next step. + if parent != nil && block.Height() != 0 { + mBlock := block.MsgBlock() + votebits := mBlock.Header.VoteBits + regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) + + // Only add the transactions in the event that the parent block's regular + // tx were validated. + if regularTxTreeValid { + // Loop through all of the regular transactions in the block to see if + // any of them are ones we need to update and spend based on the + // results map. + for i, tx := range parent.Transactions() { + // Update the transaction store with the transaction information + // if it's one of the requested transactions. + msgTx := tx.MsgTx() + if txD, exists := txStore[*tx.Sha()]; exists { + txD.Tx = tx + txD.BlockHeight = block.Height() - 1 + txD.BlockIndex = uint32(i) + txD.Spent = make([]bool, len(msgTx.TxOut)) + txD.Err = nil + } + + // Spend the origin transaction output. + for _, txIn := range msgTx.TxIn { + originHash := &txIn.PreviousOutPoint.Hash + originIndex := txIn.PreviousOutPoint.Index + if originTx, exists := txStore[*originHash]; exists { + if originTx.Spent == nil { + continue + } + if originIndex >= uint32(len(originTx.Spent)) { + continue + } + originTx.Spent[originIndex] = true + } + } + } + } + } + + // Loop through all of the stake transactions in the block to see if any of + // them are ones we need to update and spend based on the results map. + for i, tx := range block.STransactions() { + // Update the transaction store with the transaction information + // if it's one of the requested transactions. + msgTx := tx.MsgTx() + if txD, exists := txStore[*tx.Sha()]; exists { + txD.Tx = tx + txD.BlockHeight = block.Height() + txD.BlockIndex = uint32(i) + txD.Spent = make([]bool, len(msgTx.TxOut)) + txD.Err = nil + } + + // Spend the origin transaction output. + for _, txIn := range msgTx.TxIn { + originHash := &txIn.PreviousOutPoint.Hash + originIndex := txIn.PreviousOutPoint.Index + if originTx, exists := txStore[*originHash]; exists { + if originTx.Spent == nil { + continue + } + if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true @@ -64,10 +187,10 @@ func connectTransactions(txStore TxStore, block *btcutil.Block) error { // disconnectTransactions updates the passed map by undoing transaction and // spend information for all transactions in the passed block. Only // transactions in the passed map are updated. -func disconnectTransactions(txStore TxStore, block *btcutil.Block) error { - // Loop through all of the transactions in the block to see if any of +func disconnectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { + // Loop through all of the stake transactions in the block to see if any of // them are ones that need to be undone based on the transaction store. - for _, tx := range block.Transactions() { + for _, tx := range block.STransactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not @@ -75,7 +198,8 @@ func disconnectTransactions(txStore TxStore, block *btcutil.Block) error { // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil - txD.BlockHeight = 0 + txD.BlockHeight = int64(wire.NullBlockHeight) + txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } @@ -86,7 +210,10 @@ func disconnectTransactions(txStore TxStore, block *btcutil.Block) error { originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { - if originIndex > uint32(len(originTx.Spent)) { + if originTx.Spent == nil { + continue + } + if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false @@ -94,6 +221,53 @@ func disconnectTransactions(txStore TxStore, block *btcutil.Block) error { } } + // There is no regular tx from before the genesis block, so ignore the genesis + // block for the next step. + if parent != nil && block.Height() != 0 { + mBlock := block.MsgBlock() + votebits := mBlock.Header.VoteBits + regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) + + // Only bother to unspend transactions if the parent's tx tree was + // validated. Otherwise, these transactions were never in the blockchain's + // history in the first place. + if regularTxTreeValid { + // Loop through all of the regular transactions in the block to see if + // any of them are ones that need to be undone based on the + // transaction store. + for _, tx := range parent.Transactions() { + // Clear this transaction from the transaction store if needed. + // Only clear it rather than deleting it because the transaction + // connect code relies on its presence to decide whether or not + // to update the store and any transactions which exist on both + // sides of a fork would otherwise not be updated. + if txD, exists := txStore[*tx.Sha()]; exists { + txD.Tx = nil + txD.BlockHeight = int64(wire.NullBlockHeight) + txD.BlockIndex = wire.NullBlockIndex + txD.Spent = nil + txD.Err = database.ErrTxShaMissing + } + + // Unspend the origin transaction output. + for _, txIn := range tx.MsgTx().TxIn { + originHash := &txIn.PreviousOutPoint.Hash + originIndex := txIn.PreviousOutPoint.Index + originTx, exists := txStore[*originHash] + if exists && originTx.Tx != nil && originTx.Err == nil { + if originTx.Spent == nil { + continue + } + if originIndex >= uint32(len(originTx.Spent)) { + continue + } + originTx.Spent[originIndex] = false + } + } + } + } + } + return nil } @@ -101,7 +275,7 @@ func disconnectTransactions(txStore TxStore, block *btcutil.Block) error { // transactions from the point of view of the end of the main chain. It takes // a flag which specifies whether or not fully spent transaction should be // included in the results. -func fetchTxStoreMain(db database.Db, txSet map[wire.ShaHash]struct{}, includeSpent bool) TxStore { +func fetchTxStoreMain(db database.Db, txSet map[chainhash.Hash]struct{}, includeSpent bool) TxStore { // Just return an empty store now if there are no requested hashes. txStore := make(TxStore) if len(txSet) == 0 { @@ -111,7 +285,7 @@ func fetchTxStoreMain(db database.Db, txSet map[wire.ShaHash]struct{}, includeSp // The transaction store map needs to have an entry for every requested // transaction. By default, all the transactions are marked as missing. // Each entry will be filled in with the appropriate data below. - txList := make([]*wire.ShaHash, 0, len(txSet)) + txList := make([]*chainhash.Hash, 0, len(txSet)) for hash := range txSet { hashCopy := hash txStore[hash] = &TxData{Hash: &hashCopy, Err: database.ErrTxShaMissing} @@ -145,8 +319,9 @@ func fetchTxStoreMain(db database.Db, txSet map[wire.ShaHash]struct{}, includeSp // cause subtle errors, so avoid the potential altogether. txD.Err = txReply.Err if txReply.Err == nil { - txD.Tx = btcutil.NewTx(txReply.Tx) + txD.Tx = dcrutil.NewTx(txReply.Tx) txD.BlockHeight = txReply.Height + txD.BlockIndex = txReply.Index txD.Spent = make([]bool, len(txReply.TxSpent)) copy(txD.Spent, txReply.TxSpent) } @@ -155,6 +330,54 @@ func fetchTxStoreMain(db database.Db, txSet map[wire.ShaHash]struct{}, includeSp return txStore } +// handleTxStoreViewpoint appends extra Tx Trees to update to a different +// viewpoint. +func handleTxStoreViewpoint(block *dcrutil.Block, parentBlock *dcrutil.Block, + txStore TxStore, viewpoint int8) error { + // We don't need to do anything for the current top block viewpoint. + if viewpoint == ViewpointPrevValidInitial { + return nil + } + + // ViewpointPrevValidStake: Append the prev block TxTreeRegular + // txs to fill out TxIns. + if viewpoint == ViewpointPrevValidStake { + connectTxTree(txStore, parentBlock, true) + return nil + } + + // ViewpointPrevInvalidStake: Do not append the prev block + // TxTreeRegular txs, since they don't exist. + if viewpoint == ViewpointPrevInvalidStake { + return nil + } + + // ViewpointPrevValidRegular: Append the prev block TxTreeRegular + // txs to fill in TxIns, then append the cur block TxTreeStake + // txs to fill in TxInss. TxTreeRegular from current block will + // never be allowed to spend from the stake tree of the current + // block anyway because of the consensus rules regarding output + // maturity, but do it anyway. + if viewpoint == ViewpointPrevValidRegular { + connectTxTree(txStore, parentBlock, true) + connectTxTree(txStore, block, false) + return nil + } + + // ViewpointPrevInvalidRegular: Append the cur block TxTreeStake + // txs to fill in TxIns. TxTreeRegular from current block will + // never be allowed to spend from the stake tree of the current + // block anyway because of the consensus rules regarding output + // maturity, but do it anyway. + if viewpoint == ViewpointPrevInvalidRegular { + connectTxTree(txStore, block, false) + return nil + } + + return fmt.Errorf("Error: invalid viewpoint '0x%x' given to "+ + "handleTxStoreViewpoint", viewpoint) +} + // fetchTxStore fetches transaction data about the provided set of transactions // from the point of view of the given node. For example, a given node might // be down a side chain where a transaction hasn't been spent from its point of @@ -162,7 +385,8 @@ func fetchTxStoreMain(db database.Db, txSet map[wire.ShaHash]struct{}, includeSp // chain). Another scenario is where a transaction exists from the point of // view of the main chain, but doesn't exist in a side chain that branches // before the block that contains the transaction on the main chain. -func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct{}) (TxStore, error) { +func (b *BlockChain) fetchTxStore(node *blockNode, block *dcrutil.Block, + txSet map[chainhash.Hash]struct{}, viewpoint int8) (TxStore, error) { // Get the previous block node. This function is used over simply // accessing node.parent directly as it will dynamically create previous // block nodes as needed. This helps allow only the pieces of the chain @@ -171,14 +395,30 @@ func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct if err != nil { return nil, err } + // We don't care if the previous node doesn't exist because this + // block is the genesis block. + if prevNode == nil { + return nil, nil + } + + // Get the previous block, too. + prevBlock, err := b.getBlockFromHash(prevNode.hash) + if err != nil { + return nil, err + } // If we haven't selected a best chain yet or we are extending the main // (best) chain with a new block, fetch the requested set from the point // of view of the end of the main (best) chain without including fully // spent transactions in the results. This is a little more efficient // since it means less transaction lookups are needed. - if b.bestChain == nil || (prevNode != nil && prevNode.hash.IsEqual(b.bestChain.hash)) { + if b.bestChain == nil || (prevNode != nil && + prevNode.hash.IsEqual(b.bestChain.hash)) { txStore := fetchTxStoreMain(b.db, txSet, false) + err := handleTxStoreViewpoint(block, prevBlock, txStore, viewpoint) + if err != nil { + return nil, err + } return txStore, nil } @@ -193,15 +433,30 @@ func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct // transactions and spend information for the blocks which would be // disconnected during a reorganize to the point of view of the // node just before the requested node. - detachNodes, attachNodes := b.getReorganizeNodes(prevNode) + detachNodes, attachNodes, err := b.getReorganizeNodes(prevNode) + if err != nil { + return nil, err + } + for e := detachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) - block, err := b.db.FetchBlockBySha(n.hash) + blockDisconnect, err := b.db.FetchBlockBySha(n.hash) if err != nil { return nil, err } - disconnectTransactions(txStore, block) + // Load the parent block from either the database or the sidechain. + parentHash := &blockDisconnect.MsgBlock().Header.PrevBlock + + parentBlock, errFetchBlock := b.getBlockFromHash(parentHash) + if errFetchBlock != nil { + return nil, errFetchBlock + } + + err = disconnectTransactions(txStore, blockDisconnect, parentBlock) + if err != nil { + return nil, err + } } // The transaction store is now accurate to either the node where the @@ -211,6 +466,11 @@ func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct // attachNodes list indicate the requested node is on a side chain, so // if there are no nodes to attach, we're done. if attachNodes.Len() == 0 { + err = handleTxStoreViewpoint(block, prevBlock, txStore, viewpoint) + if err != nil { + return nil, err + } + return txStore, nil } @@ -218,14 +478,30 @@ func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct // transactions and spend information from each of the nodes to attach. for e := attachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) - block, exists := b.blockCache[*n.hash] + blockConnect, exists := b.blockCache[*n.hash] if !exists { return nil, fmt.Errorf("unable to find block %v in "+ "side chain cache for transaction search", n.hash) } - connectTransactions(txStore, block) + // Load the parent block from either the database or the sidechain. + parentHash := &blockConnect.MsgBlock().Header.PrevBlock + + parentBlock, errFetchBlock := b.getBlockFromHash(parentHash) + if errFetchBlock != nil { + return nil, errFetchBlock + } + + err = connectTransactions(txStore, blockConnect, parentBlock) + if err != nil { + return nil, err + } + } + + err = handleTxStoreViewpoint(block, prevBlock, txStore, viewpoint) + if err != nil { + return nil, err } return txStore, nil @@ -234,86 +510,247 @@ func (b *BlockChain) fetchTxStore(node *blockNode, txSet map[wire.ShaHash]struct // fetchInputTransactions fetches the input transactions referenced by the // transactions in the given block from its point of view. See fetchTxList // for more details on what the point of view entails. -func (b *BlockChain) fetchInputTransactions(node *blockNode, block *btcutil.Block) (TxStore, error) { - // Build a map of in-flight transactions because some of the inputs in - // this block could be referencing other transactions earlier in this - // block which are not yet in the chain. - txInFlight := map[wire.ShaHash]int{} - transactions := block.Transactions() - for i, tx := range transactions { - txInFlight[*tx.Sha()] = i +// Decred: This function is for verifying the validity of the regular tx tree in +// this block for the case that it does get accepted in the next block. +func (b *BlockChain) fetchInputTransactions(node *blockNode, block *dcrutil.Block, viewpoint int8) (TxStore, error) { + // Verify we have the same node as we do block. + blockHash := block.Sha() + if !node.hash.IsEqual(blockHash) { + return nil, fmt.Errorf("node and block hash are different!") } - // Loop through all of the transaction inputs (except for the coinbase - // which has no inputs) collecting them into sets of what is needed and - // what is already known (in-flight). - txNeededSet := make(map[wire.ShaHash]struct{}) - txStore := make(TxStore) - for i, tx := range transactions[1:] { - for _, txIn := range tx.MsgTx().TxIn { - // Add an entry to the transaction store for the needed - // transaction with it set to missing by default. - originHash := &txIn.PreviousOutPoint.Hash - txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing} - txStore[*originHash] = txD - - // It is acceptable for a transaction input to reference - // the output of another transaction in this block only - // if the referenced transaction comes before the - // current one in this block. Update the transaction - // store acccordingly when this is the case. Otherwise, - // we still need the transaction. - // - // NOTE: The >= is correct here because i is one less - // than the actual position of the transaction within - // the block due to skipping the coinbase. - if inFlightIndex, ok := txInFlight[*originHash]; ok && - i >= inFlightIndex { - - originTx := transactions[inFlightIndex] - txD.Tx = originTx - txD.BlockHeight = node.height - txD.Spent = make([]bool, len(originTx.MsgTx().TxOut)) - txD.Err = nil - } else { - txNeededSet[*originHash] = struct{}{} - } + // If we need the previous block, grab it. + var parentBlock *dcrutil.Block + if viewpoint == ViewpointPrevValidInitial || + viewpoint == ViewpointPrevValidStake || + viewpoint == ViewpointPrevValidRegular { + var errFetchBlock error + parentBlock, errFetchBlock = b.getBlockFromHash(node.parentHash) + if errFetchBlock != nil { + return nil, errFetchBlock } } - // Request the input transactions from the point of view of the node. - txNeededStore, err := b.fetchTxStore(node, txNeededSet) - if err != nil { - return nil, err + txInFlight := map[chainhash.Hash]int{} + txNeededSet := make(map[chainhash.Hash]struct{}) + txStore := make(TxStore) + + // Case 1: ViewpointPrevValidInitial. We need the viewpoint of the + // current chain without the TxTreeRegular of the previous block + // added so we can validate that. + if viewpoint == ViewpointPrevValidInitial { + // Build a map of in-flight transactions because some of the inputs in + // this block could be referencing other transactions earlier in this + // block which are not yet in the chain. + transactions := parentBlock.Transactions() + for i, tx := range transactions { + txInFlight[*tx.Sha()] = i + } + + // Loop through all of the transaction inputs (except for the coinbase + // which has no inputs) collecting them into sets of what is needed and + // what is already known (in-flight). + for i, tx := range transactions[1:] { + for _, txIn := range tx.MsgTx().TxIn { + // Add an entry to the transaction store for the needed + // transaction with it set to missing by default. + originHash := &txIn.PreviousOutPoint.Hash + txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing} + txStore[*originHash] = txD + + // It is acceptable for a transaction input to reference + // the output of another transaction in this block only + // if the referenced transaction comes before the + // current one in this block. Update the transaction + // store acccordingly when this is the case. Otherwise, + // we still need the transaction. + // + // NOTE: The >= is correct here because i is one less + // than the actual position of the transaction within + // the block due to skipping the coinbase. + if inFlightIndex, ok := txInFlight[*originHash]; ok && + i >= inFlightIndex { + + originTx := transactions[inFlightIndex] + txD.Tx = originTx + txD.BlockHeight = node.height - 1 + txD.BlockIndex = uint32(inFlightIndex) + txD.Spent = make([]bool, len(originTx.MsgTx().TxOut)) + txD.Err = nil + } else { + txNeededSet[*originHash] = struct{}{} + } + } + } + + // Request the input transactions from the point of view of the node. + txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint) + if err != nil { + return nil, err + } + + // Merge the results of the requested transactions and the in-flight + // transactions. + for _, txD := range txNeededStore { + txStore[*txD.Hash] = txD + } + + return txStore, nil } - // Merge the results of the requested transactions and the in-flight - // transactions. - for _, txD := range txNeededStore { - txStore[*txD.Hash] = txD + // Case 2+3: ViewpointPrevValidStake and ViewpointPrevValidStake. + // For ViewpointPrevValidStake, we need the viewpoint of the + // current chain with the TxTreeRegular of the previous block + // added so we can validate the TxTreeStake of the current block. + // For ViewpointPrevInvalidStake, we need the viewpoint of the + // current chain with the TxTreeRegular of the previous block + // missing so we can validate the TxTreeStake of the current block. + if viewpoint == ViewpointPrevValidStake || + viewpoint == ViewpointPrevInvalidStake { + // We need all of the stake tx txins. None of these are considered + // in-flight in relation to the regular tx tree or to other tx in + // the stake tx tree, so don't do any of those expensive checks and + // just append it to the tx slice. + stransactions := block.STransactions() + for _, tx := range stransactions { + isSSGen, _ := stake.IsSSGen(tx) + + for i, txIn := range tx.MsgTx().TxIn { + // Ignore stakebases. + if isSSGen && i == 0 { + continue + } + + // Add an entry to the transaction store for the needed + // transaction with it set to missing by default. + originHash := &txIn.PreviousOutPoint.Hash + txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing} + txStore[*originHash] = txD + + txNeededSet[*originHash] = struct{}{} + } + } + + // Request the input transactions from the point of view of the node. + txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint) + if err != nil { + return nil, err + } + + return txNeededStore, nil } - return txStore, nil + // Case 4+5: ViewpointPrevValidRegular and + // ViewpointPrevInvalidRegular. + // For ViewpointPrevValidRegular, we need the viewpoint of the + // current chain with the TxTreeRegular of the previous block + // and the TxTreeStake of the current block added so we can + // validate the TxTreeRegular of the current block. + // For ViewpointPrevInvalidRegular, we need the viewpoint of the + // current chain with the TxTreeRegular of the previous block + // missing and the TxTreeStake of the current block added so we + // can validate the TxTreeRegular of the current block. + if viewpoint == ViewpointPrevValidRegular || + viewpoint == ViewpointPrevInvalidRegular { + transactions := block.Transactions() + for i, tx := range transactions { + txInFlight[*tx.Sha()] = i + } + + // Loop through all of the transaction inputs (except for the coinbase + // which has no inputs) collecting them into sets of what is needed and + // what is already known (in-flight). + txNeededSet := make(map[chainhash.Hash]struct{}) + txStore = make(TxStore) + for i, tx := range transactions[1:] { + for _, txIn := range tx.MsgTx().TxIn { + // Add an entry to the transaction store for the needed + // transaction with it set to missing by default. + originHash := &txIn.PreviousOutPoint.Hash + txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing} + txStore[*originHash] = txD + + // It is acceptable for a transaction input to reference + // the output of another transaction in this block only + // if the referenced transaction comes before the + // current one in this block. Update the transaction + // store acccordingly when this is the case. Otherwise, + // we still need the transaction. + // + // NOTE: The >= is correct here because i is one less + // than the actual position of the transaction within + // the block due to skipping the coinbase. + if inFlightIndex, ok := txInFlight[*originHash]; ok && + i >= inFlightIndex { + + originTx := transactions[inFlightIndex] + txD.Tx = originTx + txD.BlockHeight = node.height + txD.BlockIndex = uint32(inFlightIndex) + txD.Spent = make([]bool, len(originTx.MsgTx().TxOut)) + txD.Err = nil + } else { + txNeededSet[*originHash] = struct{}{} + } + } + } + + // Request the input transactions from the point of view of the node. + txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint) + if err != nil { + return nil, err + } + + // Merge the results of the requested transactions and the in-flight + // transactions. + for _, txD := range txNeededStore { + txStore[*txD.Hash] = txD + } + + return txStore, nil + } + + return nil, fmt.Errorf("Invalid viewpoint passed to fetchInputTransactions") } // FetchTransactionStore fetches the input transactions referenced by the // passed transaction from the point of view of the end of the main chain. It // also attempts to fetch the transaction itself so the returned TxStore can be // examined for duplicate transactions. -func (b *BlockChain) FetchTransactionStore(tx *btcutil.Tx) (TxStore, error) { +// IsValid indicates if the current block on head has had its TxTreeRegular +// validated by the stake voters. +func (b *BlockChain) FetchTransactionStore(tx *dcrutil.Tx, + isValid bool) (TxStore, error) { + isSSGen, _ := stake.IsSSGen(tx) + // Create a set of needed transactions from the transactions referenced // by the inputs of the passed transaction. Also, add the passed // transaction itself as a way for the caller to detect duplicates. - txNeededSet := make(map[wire.ShaHash]struct{}) + txNeededSet := make(map[chainhash.Hash]struct{}) txNeededSet[*tx.Sha()] = struct{}{} - for _, txIn := range tx.MsgTx().TxIn { + for i, txIn := range tx.MsgTx().TxIn { + // Skip all stakebase inputs. + if isSSGen && (i == 0) { + continue + } + txNeededSet[txIn.PreviousOutPoint.Hash] = struct{}{} } // Request the input transactions from the point of view of the end of - // the main chain without including fully spent trasactions in the + // the main chain without including fully spent transactions in the // results. Fully spent transactions are only needed for chain // reorganization which does not apply here. txStore := fetchTxStoreMain(b.db, txNeededSet, false) + + topBlock, err := b.getBlockFromHash(b.bestChain.hash) + if err != nil { + return nil, err + } + + if isValid { + connectTxTree(txStore, topBlock, true) + } + return txStore, nil } diff --git a/blockchain/validate.go b/blockchain/validate.go index 8d2dd721..88d5866d 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -1,34 +1,31 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain import ( - "encoding/binary" + "bytes" + "errors" "fmt" "math" "math/big" "time" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( // MaxSigOpsPerBlock is the maximum number of signature operations // allowed for a block. It is a fraction of the max block payload size. - MaxSigOpsPerBlock = wire.MaxBlockPayload / 50 - - // lockTimeThreshold is the number below which a lock time is - // interpreted to be a block number. Since an average of one block - // is generated per 10 minutes, this allows blocks for about 9,512 - // years. However, if the field is interpreted as a timestamp, given - // the lock time is a uint32, the max is sometime around 2106. - lockTimeThreshold uint32 = 5e8 // Tue Nov 5 00:53:20 1985 UTC + MaxSigOpsPerBlock = wire.MaxBlockPayload / 200 // MaxTimeOffsetSeconds is the maximum number of seconds a block time // is allowed to be ahead of the current time. This is currently 2 @@ -45,57 +42,39 @@ const ( // used to calculate the median time used to validate block timestamps. medianTimeBlocks = 11 - // serializedHeightVersion is the block version which changed block - // coinbases to start with the serialized block height. - serializedHeightVersion = 2 - - // baseSubsidy is the starting subsidy amount for mined blocks. This - // value is halved every SubsidyHalvingInterval blocks. - baseSubsidy = 50 * btcutil.SatoshiPerBitcoin - - // CoinbaseMaturity is the number of blocks required before newly - // mined bitcoins (coinbase transactions) can be spent. - CoinbaseMaturity = 100 + // earlyVoteBitsValue is the only value of VoteBits allowed in a block + // header before stake validation height. + earlyVoteBitsValue = 0x0001 ) var ( - // coinbaseMaturity is the internal variable used for validating the - // spending of coinbase outputs. A variable rather than the exported - // constant is used because the tests need the ability to modify it. - coinbaseMaturity = int64(CoinbaseMaturity) - // zeroHash is the zero value for a wire.ShaHash and is defined as // a package level variable to avoid the need to create a new instance // every time a check is needed. - zeroHash = &wire.ShaHash{} - - // block91842Hash is one of the two nodes which violate the rules - // set forth in BIP0030. It is defined as a package level variable to - // avoid the need to create a new instance every time a check is needed. - block91842Hash = newShaHashFromStr("00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec") - - // block91880Hash is one of the two nodes which violate the rules - // set forth in BIP0030. It is defined as a package level variable to - // avoid the need to create a new instance every time a check is needed. - block91880Hash = newShaHashFromStr("00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721") + zeroHash = &chainhash.Hash{} ) // isNullOutpoint determines whether or not a previous transaction output point // is set. func isNullOutpoint(outpoint *wire.OutPoint) bool { - if outpoint.Index == math.MaxUint32 && outpoint.Hash.IsEqual(zeroHash) { + if outpoint.Index == math.MaxUint32 && outpoint.Hash.IsEqual(zeroHash) && + outpoint.Tree == dcrutil.TxTreeRegular { return true } return false } -// ShouldHaveSerializedBlockHeight determines if a block should have a -// serialized block height embedded within the scriptSig of its -// coinbase transaction. Judgement is based on the block version in the block -// header. Blocks with version 2 and above satisfy this criteria. See BIP0034 -// for further information. -func ShouldHaveSerializedBlockHeight(header *wire.BlockHeader) bool { - return header.Version >= serializedHeightVersion +// isNullFraudProof determines whether or not a previous transaction fraud proof +// is set. +func isNullFraudProof(txIn *wire.TxIn) bool { + switch { + case txIn.BlockHeight != wire.NullBlockHeight: + return false + case txIn.BlockIndex != wire.NullBlockIndex: + return false + } + + return true } // IsCoinBaseTx determines whether or not a transaction is a coinbase. A coinbase @@ -130,82 +109,13 @@ func IsCoinBaseTx(msgTx *wire.MsgTx) bool { // // This function only differs from IsCoinBaseTx in that it works with a higher // level util transaction as opposed to a raw wire transaction. -func IsCoinBase(tx *btcutil.Tx) bool { +func IsCoinBase(tx *dcrutil.Tx) bool { return IsCoinBaseTx(tx.MsgTx()) } -// IsFinalizedTransaction determines whether or not a transaction is finalized. -func IsFinalizedTransaction(tx *btcutil.Tx, blockHeight int64, blockTime time.Time) bool { - msgTx := tx.MsgTx() - - // Lock time of zero means the transaction is finalized. - lockTime := msgTx.LockTime - if lockTime == 0 { - return true - } - - // The lock time field of a transaction is either a block height at - // which the transaction is finalized or a timestamp depending on if the - // value is before the lockTimeThreshold. When it is under the - // threshold it is a block height. - blockTimeOrHeight := int64(0) - if lockTime < lockTimeThreshold { - blockTimeOrHeight = blockHeight - } else { - blockTimeOrHeight = blockTime.Unix() - } - if int64(lockTime) < blockTimeOrHeight { - return true - } - - // At this point, the transaction's lock time hasn't occured yet, but - // the transaction might still be finalized if the sequence number - // for all transaction inputs is maxed out. - for _, txIn := range msgTx.TxIn { - if txIn.Sequence != math.MaxUint32 { - return false - } - } - return true -} - -// isBIP0030Node returns whether or not the passed node represents one of the -// two blocks that violate the BIP0030 rule which prevents transactions from -// overwriting old ones. -func isBIP0030Node(node *blockNode) bool { - if node.height == 91842 && node.hash.IsEqual(block91842Hash) { - return true - } - - if node.height == 91880 && node.hash.IsEqual(block91880Hash) { - return true - } - - return false -} - -// CalcBlockSubsidy returns the subsidy amount a block at the provided height -// should have. This is mainly used for determining how much the coinbase for -// newly generated blocks awards as well as validating the coinbase for blocks -// has the expected value. -// -// The subsidy is halved every SubsidyHalvingInterval blocks. Mathematically -// this is: baseSubsidy / 2^(height/subsidyHalvingInterval) -// -// At the target block generation rate for the main network, this is -// approximately every 4 years. -func CalcBlockSubsidy(height int64, chainParams *chaincfg.Params) int64 { - if chainParams.SubsidyHalvingInterval == 0 { - return baseSubsidy - } - - // Equivalent to: baseSubsidy / 2^(height/subsidyHalvingInterval) - return baseSubsidy >> uint(height/int64(chainParams.SubsidyHalvingInterval)) -} - // CheckTransactionSanity performs some preliminary checks on a transaction to // ensure it is sane. These checks are context free. -func CheckTransactionSanity(tx *btcutil.Tx) error { +func CheckTransactionSanity(tx *dcrutil.Tx, params *chaincfg.Params) error { // A transaction must have at least one input. msgTx := tx.MsgTx() if len(msgTx.TxIn) == 0 { @@ -220,9 +130,9 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { // A transaction must not exceed the maximum allowed block payload when // serialized. serializedTxSize := tx.MsgTx().SerializeSize() - if serializedTxSize > wire.MaxBlockPayload { + if serializedTxSize > params.MaximumBlockSize { str := fmt.Sprintf("serialized transaction is too big - got "+ - "%d, max %d", serializedTxSize, wire.MaxBlockPayload) + "%d, max %d", serializedTxSize, params.MaximumBlockSize) return ruleError(ErrTxTooBig, str) } @@ -230,37 +140,37 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { // output must not be negative or more than the max allowed per // transaction. Also, the total of all outputs must abide by the same // restrictions. All amounts in a transaction are in a unit value known - // as a satoshi. One bitcoin is a quantity of satoshi as defined by the - // SatoshiPerBitcoin constant. - var totalSatoshi int64 + // as a atom. One decred is a quantity of atoms as defined by the + // AtomsPerCoin constant. + var totalAtom int64 for _, txOut := range msgTx.TxOut { - satoshi := txOut.Value - if satoshi < 0 { + atom := txOut.Value + if atom < 0 { str := fmt.Sprintf("transaction output has negative "+ - "value of %v", satoshi) + "value of %v", atom) return ruleError(ErrBadTxOutValue, str) } - if satoshi > btcutil.MaxSatoshi { + if atom > dcrutil.MaxAmount { str := fmt.Sprintf("transaction output value of %v is "+ - "higher than max allowed value of %v", satoshi, - btcutil.MaxSatoshi) + "higher than max allowed value of %v", atom, + dcrutil.MaxAmount) return ruleError(ErrBadTxOutValue, str) } - // TODO(davec): No need to check < 0 here as satoshi is + // TODO(davec): No need to check < 0 here as atom is // guaranteed to be positive per the above check. Also need // to add overflow checks. - totalSatoshi += satoshi - if totalSatoshi < 0 { + totalAtom += atom + if totalAtom < 0 { str := fmt.Sprintf("total value of all transaction "+ - "outputs has negative value of %v", totalSatoshi) + "outputs has negative value of %v", totalAtom) return ruleError(ErrBadTxOutValue, str) } - if totalSatoshi > btcutil.MaxSatoshi { + if totalAtom > dcrutil.MaxAmount { str := fmt.Sprintf("total value of all transaction "+ "outputs is %v which is higher than max "+ - "allowed value of %v", totalSatoshi, - btcutil.MaxSatoshi) + "allowed value of %v", totalAtom, + dcrutil.MaxAmount) return ruleError(ErrBadTxOutValue, str) } } @@ -275,8 +185,24 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { existingTxOut[txIn.PreviousOutPoint] = struct{}{} } + isSSGen, _ := stake.IsSSGen(tx) + // Coinbase script length must be between min and max length. if IsCoinBase(tx) { + // The referenced outpoint should be null. + if !isNullOutpoint(&msgTx.TxIn[0].PreviousOutPoint) { + str := fmt.Sprintf("coinbase transaction did not use a " + + "null outpoint") + return ruleError(ErrBadCoinbaseOutpoint, str) + } + + // The fraud proof should also be null. + if !isNullFraudProof(msgTx.TxIn[0]) { + str := fmt.Sprintf("coinbase transaction fraud proof was " + + "non-null") + return ruleError(ErrBadCoinbaseFraudProof, str) + } + slen := len(msgTx.TxIn[0].SignatureScript) if slen < MinCoinbaseScriptLen || slen > MaxCoinbaseScriptLen { str := fmt.Sprintf("coinbase transaction script length "+ @@ -284,9 +210,38 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { slen, MinCoinbaseScriptLen, MaxCoinbaseScriptLen) return ruleError(ErrBadCoinbaseScriptLen, str) } + } else if isSSGen { + // Check script length of stake base signature. + slen := len(msgTx.TxIn[0].SignatureScript) + if slen < MinCoinbaseScriptLen || slen > MaxCoinbaseScriptLen { + str := fmt.Sprintf("stakebase transaction script length "+ + "of %d is out of range (min: %d, max: %d)", + slen, MinCoinbaseScriptLen, MaxCoinbaseScriptLen) + return ruleError(ErrBadStakebaseScriptLen, str) + } + + // The script must be set to the one specified by the network. + // Check script length of stake base signature. + if !bytes.Equal(msgTx.TxIn[0].SignatureScript, + params.StakeBaseSigScript) { + str := fmt.Sprintf("stakebase transaction signature script "+ + "was set to disallowed value (got %x, want %x)", + msgTx.TxIn[0].SignatureScript, + params.StakeBaseSigScript) + return ruleError(ErrBadStakevaseScrVal, str) + } + + // The ticket reference hash in an SSGen tx must not be null. + ticketHash := &msgTx.TxIn[1].PreviousOutPoint + if isNullOutpoint(ticketHash) { + return ruleError(ErrBadTxInput, "ssgen tx "+ + "ticket input refers to previous output that "+ + "is null") + } } else { // Previous transaction outputs referenced by the inputs to this - // transaction must not be null. + // transaction must not be null except in the case of stake bases + // for SSGen tx. for _, txIn := range msgTx.TxIn { prevOut := &txIn.PreviousOutPoint if isNullOutpoint(prevOut) { @@ -300,6 +255,42 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { return nil } +// checkProofOfStake checks to see that all new SStx tx in a block are actually at +// the network stake target. +func checkProofOfStake(block *dcrutil.Block, posLimit int64) error { + msgBlock := block.MsgBlock() + for _, staketx := range block.STransactions() { + if is, _ := stake.IsSStx(staketx); is { + commitValue := staketx.MsgTx().TxOut[0].Value + + // Check for underflow block sbits. + if commitValue < msgBlock.Header.SBits { + errStr := fmt.Sprintf("Stake tx %v has a commitment value "+ + "less than the minimum stake difficulty specified in "+ + "the block (%v)", + staketx.Sha(), msgBlock.Header.SBits) + return ruleError(ErrNotEnoughStake, errStr) + } + + // Check if it's above the PoS limit. + if commitValue < posLimit { + errStr := fmt.Sprintf("Stake tx %v has a commitment value "+ + "less than the minimum stake difficulty for the "+ + "network (%v)", + staketx.Sha(), posLimit) + return ruleError(ErrStakeBelowMinimum, errStr) + } + } + } + + return nil +} + +// CheckProofOfStake exports the above func. +func CheckProofOfStake(block *dcrutil.Block, posLimit int64) error { + return checkProofOfStake(block, posLimit) +} + // checkProofOfWork ensures the block header bits which indicate the target // difficulty is in min/max range and that the block hash is less than the // target difficulty as claimed. @@ -307,7 +298,8 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { // The flags modify the behavior of this function as follows: // - BFNoPoWCheck: The check to ensure the block hash is less than the target // difficulty is not performed. -func checkProofOfWork(header *wire.BlockHeader, powLimit *big.Int, flags BehaviorFlags) error { +func checkProofOfWork(header *wire.BlockHeader, powLimit *big.Int, + flags BehaviorFlags) error { // The target difficulty must be larger than zero. target := CompactToBig(header.Bits) if target.Sign() <= 0 { @@ -342,7 +334,7 @@ func checkProofOfWork(header *wire.BlockHeader, powLimit *big.Int, flags Behavio // CheckProofOfWork ensures the block header bits which indicate the target // difficulty is in min/max range and that the block hash is less than the // target difficulty as claimed. -func CheckProofOfWork(block *btcutil.Block, powLimit *big.Int) error { +func CheckProofOfWork(block *dcrutil.Block, powLimit *big.Int) error { return checkProofOfWork(&block.MsgBlock().Header, powLimit, BFNone) } @@ -350,13 +342,22 @@ func CheckProofOfWork(block *btcutil.Block, powLimit *big.Int) error { // input and output scripts in the provided transaction. This uses the // quicker, but imprecise, signature operation counting mechanism from // txscript. -func CountSigOps(tx *btcutil.Tx) int { +func CountSigOps(tx *dcrutil.Tx, isCoinBaseTx bool, isSSGen bool) int { msgTx := tx.MsgTx() // Accumulate the number of signature operations in all transaction // inputs. totalSigOps := 0 - for _, txIn := range msgTx.TxIn { + for i, txIn := range msgTx.TxIn { + // Skip coinbase inputs. + if isCoinBaseTx { + continue + } + // Skip stakebase inputs. + if isSSGen && i == 0 { + continue + } + numSigOps := txscript.GetSigOpCount(txIn.SignatureScript) totalSigOps += numSigOps } @@ -375,12 +376,19 @@ func CountSigOps(tx *btcutil.Tx) int { // transactions which are of the pay-to-script-hash type. This uses the // precise, signature operation counting mechanism from the script engine which // requires access to the input transaction scripts. -func CountP2SHSigOps(tx *btcutil.Tx, isCoinBaseTx bool, txStore TxStore) (int, error) { +func CountP2SHSigOps(tx *dcrutil.Tx, isCoinBaseTx bool, isStakeBaseTx bool, + txStore TxStore) (int, error) { // Coinbase transactions have no interesting inputs. if isCoinBaseTx { return 0, nil } + // Stakebase (SSGen) transactions have no P2SH inputs. Same with SSRtx, + // but they will still pass the checks below. + if isStakeBaseTx { + return 0, nil + } + // Accumulate the number of signature operations in all transaction // inputs. msgTx := tx.MsgTx() @@ -442,7 +450,12 @@ func CountP2SHSigOps(tx *btcutil.Tx, isCoinBaseTx bool, txStore TxStore) (int, e // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to checkProofOfWork. -func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSource MedianTimeSource, flags BehaviorFlags) error { +func checkBlockHeaderSanity(block *dcrutil.Block, timeSource MedianTimeSource, + flags BehaviorFlags, chainParams *chaincfg.Params) error { + powLimit := chainParams.PowLimit + posLimit := chainParams.MinimumStakeDiff + header := &block.MsgBlock().Header + // Ensure the proof of work bits in the block header is in min/max range // and the block hash is less than the target value described by the // bits. @@ -451,6 +464,13 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou return err } + // Check to make sure that all newly purchased tickets meet the difficulty + // specified in the block. + err = checkProofOfStake(block, posLimit) + if err != nil { + return err + } + // A block timestamp must not have a greater precision than one second. // This check is necessary because Go time.Time values support // nanosecond precision whereas the consensus rules only apply to @@ -463,8 +483,7 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou } // Ensure the block time is not too far in the future. - maxTimestamp := timeSource.AdjustedTime().Add(time.Second * - MaxTimeOffsetSeconds) + maxTimestamp := time.Now().Add(time.Second * MaxTimeOffsetSeconds) if header.Timestamp.After(maxTimestamp) { str := fmt.Sprintf("block timestamp of %v is too far in the "+ "future", header.Timestamp) @@ -479,15 +498,17 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to checkBlockHeaderSanity. -func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource MedianTimeSource, flags BehaviorFlags) error { +func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, + flags BehaviorFlags, chainParams *chaincfg.Params) error { + msgBlock := block.MsgBlock() header := &msgBlock.Header - err := checkBlockHeaderSanity(header, powLimit, timeSource, flags) + err := checkBlockHeaderSanity(block, timeSource, flags, chainParams) if err != nil { return err } - // A block must have at least one transaction. + // A block must have at least one regular transaction. numTx := len(msgBlock.Transactions) if numTx == 0 { return ruleError(ErrNoTransactions, "block does not contain "+ @@ -495,22 +516,36 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median } // A block must not have more transactions than the max block payload. - if numTx > wire.MaxBlockPayload { + if numTx > chainParams.MaximumBlockSize { str := fmt.Sprintf("block contains too many transactions - "+ - "got %d, max %d", numTx, wire.MaxBlockPayload) + "got %d, max %d", numTx, chainParams.MaximumBlockSize) + return ruleError(ErrTooManyTransactions, str) + } + + // A block must not have more stake transactions than the max block payload. + numStakeTx := len(msgBlock.STransactions) + if numStakeTx > chainParams.MaximumBlockSize { + str := fmt.Sprintf("block contains too many stake transactions - "+ + "got %d, max %d", numStakeTx, chainParams.MaximumBlockSize) return ruleError(ErrTooManyTransactions, str) } // A block must not exceed the maximum allowed block payload when // serialized. serializedSize := msgBlock.SerializeSize() - if serializedSize > wire.MaxBlockPayload { + if serializedSize > chainParams.MaximumBlockSize { str := fmt.Sprintf("serialized block is too big - got %d, "+ - "max %d", serializedSize, wire.MaxBlockPayload) + "max %d", serializedSize, chainParams.MaximumBlockSize) return ruleError(ErrBlockTooBig, str) } + if msgBlock.Header.Size != uint32(serializedSize) { + str := fmt.Sprintf("serialized block is not size indicated in "+ + "header - got %d, expected %d", msgBlock.Header.Size, + serializedSize) + return ruleError(ErrWrongBlockSize, str) + } - // The first transaction in a block must be a coinbase. + // The first transaction in a block's txtreeregular must be a coinbase. transactions := block.Transactions() if !IsCoinBase(transactions[0]) { return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+ @@ -529,11 +564,34 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median // Do some preliminary checks on each transaction to ensure they are // sane before continuing. for _, tx := range transactions { - err := CheckTransactionSanity(tx) + txType := stake.DetermineTxType(tx) + if txType != stake.TxTypeRegular { + errStr := fmt.Sprintf("found stake tx in regular tx tree") + return ruleError(ErrStakeTxInRegularTree, errStr) + } + err := CheckTransactionSanity(tx, chainParams) if err != nil { return err } } + for _, stx := range block.STransactions() { + txType := stake.DetermineTxType(stx) + if txType == stake.TxTypeRegular { + errStr := fmt.Sprintf("found regular tx in stake tx tree") + return ruleError(ErrRegTxInStakeTree, errStr) + } + err := CheckTransactionSanity(stx, chainParams) + if err != nil { + return err + } + } + + // Check that the coinbase pays the tax, if applicable. + err = CoinbasePaysTax(block.Transactions()[0], header.Height, header.Voters, + chainParams) + if err != nil { + return err + } // Build merkle tree and ensure the calculated merkle root matches the // entry in the block header. This also has the effect of caching all @@ -550,11 +608,24 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median return ruleError(ErrBadMerkleRoot, str) } + // Build the stake tx tree merkle root too and check it. + merkleStake := BuildMerkleTreeStore(block.STransactions()) + calculatedStakeMerkleRoot := merkleStake[len(merkleStake)-1] + if !header.StakeRoot.IsEqual(calculatedStakeMerkleRoot) { + str := fmt.Sprintf("block stake merkle root is invalid - block "+ + "header indicates %v, but calculated value is %v", + header.StakeRoot, calculatedStakeMerkleRoot) + return ruleError(ErrBadMerkleRoot, str) + } + // Check for duplicate transactions. This check will be fairly quick // since the transaction hashes are already cached due to building the // merkle tree above. - existingTxHashes := make(map[wire.ShaHash]struct{}) - for _, tx := range transactions { + existingTxHashes := make(map[chainhash.Hash]struct{}) + stakeTransactions := block.STransactions() + allTransactions := append(transactions, stakeTransactions...) + + for _, tx := range allTransactions { hash := tx.Sha() if _, exists := existingTxHashes[*hash]; exists { str := fmt.Sprintf("block contains duplicate "+ @@ -567,11 +638,15 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median // The number of signature operations must be less than the maximum // allowed per block. totalSigOps := 0 - for _, tx := range transactions { + for _, tx := range allTransactions { // We could potentially overflow the accumulator so check for // overflow. lastSigOps := totalSigOps - totalSigOps += CountSigOps(tx) + + isSSGen, _ := stake.IsSSGen(tx) + isCoinBase := IsCoinBase(tx) + + totalSigOps += CountSigOps(tx, isCoinBase, isSSGen) if totalSigOps < lastSigOps || totalSigOps > MaxSigOpsPerBlock { str := fmt.Sprintf("block contains too many signature "+ "operations - got %v, max %v", totalSigOps, @@ -580,13 +655,33 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median } } + // Blocks before stake validation height may only have 0x0001 + // as their VoteBits in the header. + if int64(header.Height) < chainParams.StakeValidationHeight { + if header.VoteBits != earlyVoteBitsValue { + str := fmt.Sprintf("pre stake validation height block %v "+ + "contained an invalid votebits value (expected %v, "+ + "got %v)", block.Sha(), earlyVoteBitsValue, + header.VoteBits) + return ruleError(ErrInvalidEarlyVoteBits, str) + } + } + return nil } // CheckBlockSanity performs some preliminary checks on a block to ensure it is // sane before continuing with block processing. These checks are context free. -func CheckBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource MedianTimeSource) error { - return checkBlockSanity(block, powLimit, timeSource, BFNone) +func CheckBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, + chainParams *chaincfg.Params) error { + return checkBlockSanity(block, timeSource, BFNone, chainParams) +} + +// CheckBlockSanity performs some preliminary checks on a block to ensure it is +// sane before continuing with block processing. These checks are context free. +func CheckWorklessBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, + chainParams *chaincfg.Params) error { + return checkBlockSanity(block, timeSource, BFNoPoWCheck, chainParams) } // checkBlockHeaderContext peforms several validation checks on the block header @@ -595,7 +690,8 @@ func CheckBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median // The flags modify the behavior of this function as follows: // - BFFastAdd: All checks except those involving comparing the header against // the checkpoints are not performed. -func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode *blockNode, flags BehaviorFlags) error { +func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, + prevNode *blockNode, flags BehaviorFlags) error { // The genesis block is valid by definition. if prevNode == nil { return nil @@ -660,21 +756,13 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode } if !fastAdd { - // Reject version 2 blocks once a majority of the network has - // upgraded. This is part of BIP0066. - if header.Version < 3 && b.isMajorityVersion(3, prevNode, - b.chainParams.BlockRejectNumRequired) { - - str := "new blocks with version %d are no longer valid" - str = fmt.Sprintf(str, header.Version) - return ruleError(ErrBlockVersionTooOld, str) - } - - // Reject version 1 blocks once a majority of the network has - // upgraded. This is part of BIP0034. - if header.Version < 2 && b.isMajorityVersion(2, prevNode, - b.chainParams.BlockRejectNumRequired) { - + // Reject old version blocks once a majority of the network has + // upgraded. + mv := b.chainParams.CurrentBlockVersion + if header.Version < mv && + b.isMajorityVersion(mv, + prevNode, + b.chainParams.CurrentBlockVersion) { str := "new blocks with version %d are no longer valid" str = fmt.Sprintf(str, header.Version) return ruleError(ErrBlockVersionTooOld, str) @@ -684,110 +772,6 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode return nil } -// checkBlockContext peforms several validation checks on the block which depend -// on its position within the block chain. -// -// The flags modify the behavior of this function as follows: -// - BFFastAdd: The transaction are not checked to see if they are finalized -// and the somewhat expensive BIP0034 validation is not performed. -// -// The flags are also passed to checkBlockHeaderContext. See its documentation -// for how the flags modify its behavior. -func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode, flags BehaviorFlags) error { - // The genesis block is valid by definition. - if prevNode == nil { - return nil - } - - // Perform all block header related validation checks. - header := &block.MsgBlock().Header - err := b.checkBlockHeaderContext(header, prevNode, flags) - if err != nil { - return err - } - - fastAdd := flags&BFFastAdd == BFFastAdd - if !fastAdd { - // The height of this block is one more than the referenced - // previous block. - blockHeight := prevNode.height + 1 - - // Ensure all transactions in the block are finalized. - for _, tx := range block.Transactions() { - if !IsFinalizedTransaction(tx, blockHeight, - header.Timestamp) { - - str := fmt.Sprintf("block contains unfinalized "+ - "transaction %v", tx.Sha()) - return ruleError(ErrUnfinalizedTx, str) - } - } - - // Ensure coinbase starts with serialized block heights for - // blocks whose version is the serializedHeightVersion or newer - // once a majority of the network has upgraded. This is part of - // BIP0034. - if ShouldHaveSerializedBlockHeight(header) && - b.isMajorityVersion(serializedHeightVersion, prevNode, - b.chainParams.BlockEnforceNumRequired) { - - coinbaseTx := block.Transactions()[0] - err := checkSerializedHeight(coinbaseTx, blockHeight) - if err != nil { - return err - } - } - } - - return nil -} - -// ExtractCoinbaseHeight attempts to extract the height of the block from the -// scriptSig of a coinbase transaction. Coinbase heights are only present in -// blocks of version 2 or later. This was added as part of BIP0034. -func ExtractCoinbaseHeight(coinbaseTx *btcutil.Tx) (int64, error) { - sigScript := coinbaseTx.MsgTx().TxIn[0].SignatureScript - if len(sigScript) < 1 { - str := "the coinbase signature script for blocks of " + - "version %d or greater must start with the " + - "length of the serialized block height" - str = fmt.Sprintf(str, serializedHeightVersion) - return 0, ruleError(ErrMissingCoinbaseHeight, str) - } - - serializedLen := int(sigScript[0]) - if len(sigScript[1:]) < serializedLen { - str := "the coinbase signature script for blocks of " + - "version %d or greater must start with the " + - "serialized block height" - str = fmt.Sprintf(str, serializedLen) - return 0, ruleError(ErrMissingCoinbaseHeight, str) - } - - serializedHeightBytes := make([]byte, 8, 8) - copy(serializedHeightBytes, sigScript[1:serializedLen+1]) - serializedHeight := binary.LittleEndian.Uint64(serializedHeightBytes) - - return int64(serializedHeight), nil -} - -// checkSerializedHeight checks if the signature script in the passed -// transaction starts with the serialized block height of wantHeight. -func checkSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int64) error { - serializedHeight, err := ExtractCoinbaseHeight(coinbaseTx) - if err != nil { - return err - } - - if serializedHeight != wantHeight { - str := fmt.Sprintf("the coinbase signature script serialized "+ - "block height is %d when %d was expected", - serializedHeight, wantHeight) - return ruleError(ErrBadCoinbaseHeight, str) - } - return nil -} - // isTransactionSpent returns whether or not the provided transaction data // describes a fully spent transaction. A fully spent transaction is one where // all outputs have been spent. @@ -800,26 +784,19 @@ func isTransactionSpent(txD *TxData) bool { return true } -// checkBIP0030 ensures blocks do not contain duplicate transactions which -// 'overwrite' older transactions that are not fully spent. This prevents an -// attack where a coinbase and all of its dependent transactions could be -// duplicated to effectively revert the overwritten transactions to a single -// confirmation thereby making them vulnerable to a double spend. +// checkDupTxsMain ensures blocks do not contain duplicate +// transactions which 'overwrite' older transactions that are not fully +// spent. This prevents an attack where a coinbase and all of its +// dependent transactions could be duplicated to effectively revert the +// overwritten transactions to a single confirmation thereby making +// them vulnerable to a double spend. // // For more details, see https://en.bitcoin.it/wiki/BIP_0030 and // http://r6.ca/blog/20120206T005236Z.html. -func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block) error { - // Attempt to fetch duplicate transactions for all of the transactions - // in this block from the point of view of the parent node. - fetchSet := make(map[wire.ShaHash]struct{}) - for _, tx := range block.Transactions() { - fetchSet[*tx.Sha()] = struct{}{} - } - txResults, err := b.fetchTxStore(node, fetchSet) - if err != nil { - return err - } - +// +// Decred: Check the stake transactions to make sure they don't have this txid +// too. +func (b *BlockChain) checkDupTxsMain(txResults TxStore) error { // Examine the resulting data about the requested transactions. for _, txD := range txResults { switch txD.Err { @@ -848,91 +825,1124 @@ func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block) error { return nil } +// checkDupTxs is a local function to check for duplicate tx hashes in +// blocks that spend from tx hashes that are not yet totally spent. +func (b *BlockChain) checkDupTxs(node *blockNode, parentNode *blockNode, + block *dcrutil.Block, parentBlock *dcrutil.Block) error { + // Attempt to fetch duplicate transactions for all of the transactions + // in this block from the point of view of the parent node and the + // sequential addition of different TxTrees. + + // Genesis block. + if parentNode == nil { + return nil + } + + // Parent TxTreeRegular (if applicable). + regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits, + dcrutil.BlockValid) + thisNodeStakeViewpoint := ViewpointPrevInvalidStake + thisNodeRegularViewpoint := ViewpointPrevInvalidRegular + + if regularTxTreeValid { + fetchSet := make(map[chainhash.Hash]struct{}) + + for _, tx := range parentBlock.Transactions() { + fetchSet[*tx.Sha()] = struct{}{} + } + + txResults, err := b.fetchTxStore(parentNode, block, fetchSet, + ViewpointPrevValidInitial) + if err != nil { + log.Tracef("Failed to fetch TxTreeRegular viewpoint of prev "+ + "block: %v", err.Error()) + return err + } + + err = b.checkDupTxsMain(txResults) + if err != nil { + str := fmt.Sprintf("Failed dup tx check of TxTreeRegular of prev "+ + "block: %v", err.Error()) + return ruleError(ErrDuplicateTx, str) + } + + // TxTreeRegular of previous block is valid, so change the viewpoint + // below. + thisNodeStakeViewpoint = ViewpointPrevValidStake + thisNodeRegularViewpoint = ViewpointPrevValidRegular + } + + fetchSetStake := make(map[chainhash.Hash]struct{}) + + for _, tx := range block.STransactions() { + fetchSetStake[*tx.Sha()] = struct{}{} + } + + txResults, err := b.fetchTxStore(node, block, fetchSetStake, + thisNodeStakeViewpoint) + if err != nil { + log.Tracef("Failed to fetch TxTreeStake viewpoint of cur "+ + "block: %v", err.Error()) + return err + } + + err = b.checkDupTxsMain(txResults) + if err != nil { + str := fmt.Sprintf("Failed dup tx check of TxTreeStake of cur "+ + "block: %v", err.Error()) + return ruleError(ErrDuplicateTx, str) + } + + fetchSetRegular := make(map[chainhash.Hash]struct{}) + + for _, tx := range block.Transactions() { + fetchSetRegular[*tx.Sha()] = struct{}{} + } + + txResults, err = b.fetchTxStore(node, block, fetchSetRegular, + thisNodeRegularViewpoint) + if err != nil { + log.Tracef("Failed to fetch TxTreeRegular viewpoint of cur "+ + "block: %v", err.Error()) + return err + } + + err = b.checkDupTxsMain(txResults) + if err != nil { + str := fmt.Sprintf("Failed dup tx check of TxTreeRegular of cur "+ + "block: %v", err.Error()) + ruleError(ErrDuplicateTx, str) + } + + return nil +} + +// CheckBlockStakeSanity performs a series of checks on a block to ensure that the +// information from the block's header about stake is sane. For instance, the +// number of SSGen tx must be equal to voters. +// TODO: We can consider breaking this into two functions and making some of these +// checks go through in processBlock, however if a block has demonstrable PoW it +// seems unlikely that it will have stake errors (because the miner is then just +// wasting hash power). +func (b *BlockChain) CheckBlockStakeSanity(tixStore TicketStore, + stakeValidationHeight int64, node *blockNode, block *dcrutil.Block, + parent *dcrutil.Block, chainParams *chaincfg.Params) error { + + // Setup variables. + stakeTransactions := block.STransactions() + msgBlock := block.MsgBlock() + voters := msgBlock.Header.Voters + freshstake := msgBlock.Header.FreshStake + revocations := msgBlock.Header.Revocations + sbits := msgBlock.Header.SBits + blockSha := block.Sha() + prevBlockHash := &msgBlock.Header.PrevBlock + poolSize := int(msgBlock.Header.PoolSize) + finalState := node.header.FinalState + + ticketsPerBlock := int(b.chainParams.TicketsPerBlock) + + txTreeRegularValid := dcrutil.IsFlagSet16(msgBlock.Header.VoteBits, + dcrutil.BlockValid) + + stakeEnabledHeight := chainParams.StakeEnabledHeight + + // Do some preliminary checks on each stake transaction to ensure they + // are sane before continuing. + ssGens := 0 // Votes + ssRtxs := 0 // Revocations + for i, tx := range stakeTransactions { + isSSGen, _ := stake.IsSSGen(tx) + isSSRtx, _ := stake.IsSSRtx(tx) + + if isSSGen { + ssGens++ + } + + if isSSRtx { + ssRtxs++ + } + + // If we haven't reached the point in which staking is enabled, there + // should be absolutely no SSGen or SSRtx transactions. + if (isSSGen && (block.Height() < stakeEnabledHeight)) || + (isSSRtx && (block.Height() < stakeEnabledHeight)) { + errStr := fmt.Sprintf("block contained SSGen or SSRtx "+ + "transaction at idx %v, which was before stake voting"+ + " was enabled; block height %v, stake enabled height "+ + "%v", i, block.Height(), stakeEnabledHeight) + return ruleError(ErrInvalidEarlyStakeTx, errStr) + } + } + + // Make sure we have no votes or revocations if stake validation is + // not enabled. + containsVotes := ssGens > 0 + containsRevocations := ssRtxs > 0 + if node.height < chainParams.StakeValidationHeight && + (containsVotes || containsRevocations) { + errStr := fmt.Sprintf("block contained votes or revocations " + + "before the stake validation height") + return ruleError(ErrInvalidEarlyStakeTx, errStr) + } + // Check the number of voters if stake validation is enabled. + if node.height >= chainParams.StakeValidationHeight { + // Too many voters on this block. + if ssGens > int(chainParams.TicketsPerBlock) { + errStr := fmt.Sprintf("block contained too many votes! "+ + "%v votes but only %v max allowed", ssGens, + chainParams.TicketsPerBlock) + return ruleError(ErrTooManyVotes, errStr) + } + + // Not enough voters on this block. + if block.Height() >= stakeValidationHeight && + ssGens <= int(chainParams.TicketsPerBlock)/2 { + errStr := fmt.Sprintf("block contained too few votes! "+ + "%v votes but %v or more required", ssGens, + (int(chainParams.TicketsPerBlock)/2)+1) + return ruleError(ErrNotEnoughVotes, errStr) + } + } + + // ---------------------------------------------------------------------------- + // SStx Tx Handling + // ---------------------------------------------------------------------------- + // PER SSTX + // 1. Check to make sure that the amount committed with the SStx is equal to + // the target of the last block (sBits). + // 2. Ensure the the number of SStx tx in the block is the same as FreshStake + // in the header. + // PER BLOCK + // 3. Check to make sure we haven't exceeded max number of new SStx. + + numSStxTx := 0 + + for _, staketx := range stakeTransactions { + if is, _ := stake.IsSStx(staketx); is { + numSStxTx++ + + // 1. Make sure that we're committing enough coins. Checked already + // when we check stake difficulty, so may not be needed. + if staketx.MsgTx().TxOut[0].Value < sbits { + txSha := staketx.Sha() + errStr := fmt.Sprintf("Error in stake consensus: the amount "+ + "committed in SStx %v was less than the sBits value %v", + txSha, sbits) + return ruleError(ErrNotEnoughStake, errStr) + } + } + } + + // 2. Ensure the the number of SStx tx in the block is the same as FreshStake + // in the header. + if uint8(numSStxTx) != freshstake { + errStr := fmt.Sprintf("Error in stake consensus: the number of SStx tx "+ + "in block %v was %v, however in the header freshstake is %v", blockSha, + numSStxTx, freshstake) + return ruleError(ErrFreshStakeMismatch, errStr) + } + + // 3. Check to make sure we haven't exceeded max number of new SStx. May not + // need this check, as the above one should fail if you overflow uint8. + if numSStxTx > int(chainParams.MaxFreshStakePerBlock) { + errStr := fmt.Sprintf("Error in stake consensus: the number of SStx tx "+ + "in block %v was %v, overflowing the maximum allowed (255)", blockSha, + numSStxTx) + return ruleError(ErrTooManySStxs, errStr) + } + + // Break if the stake system is otherwise disabled ---------------------------- + if block.Height() < stakeValidationHeight { + stakeTxSum := numSStxTx + + // Check and make sure we're only including SStx in the stake tx tree. + if stakeTxSum != len(stakeTransactions) { + errStr := fmt.Sprintf("Error in stake consensus: the number of "+ + "stake tx in block %v was %v, however we expected %v", + block.Sha(), stakeTxSum, len(stakeTransactions)) + return ruleError(ErrInvalidEarlyStakeTx, errStr) + } + + // Check the ticket pool size. + _, calcPoolSize, _, err := b.getWinningTicketsInclStore(node, tixStore) + if err != nil { + log.Tracef("failed to retrieve poolsize for stake "+ + "consensus: %v", err.Error()) + return err + } + + if calcPoolSize != poolSize { + errStr := fmt.Sprintf("Error in stake consensus: the poolsize "+ + "in block %v was %v, however we expected %v", + node.hash, + poolSize, + calcPoolSize) + return ruleError(ErrPoolSize, errStr) + } + + return nil + } + + // ---------------------------------------------------------------------------- + // General Purpose Checks + // ---------------------------------------------------------------------------- + // 1. Check that we have a majority vote of potential voters. + + // 1. Check to make sure we have a majority of the potential voters voting. + if msgBlock.Header.Voters == 0 { + errStr := fmt.Sprintf("Error: no voters in block %v", + blockSha) + return ruleError(ErrNotEnoughVotes, errStr) + } + + majority := (chainParams.TicketsPerBlock / 2) + 1 + if msgBlock.Header.Voters < majority { + errStr := fmt.Sprintf("Error in stake consensus: the number of voters is "+ + "not in the majority as compared to potential votes for block %v", + blockSha) + return ruleError(ErrNotEnoughVotes, errStr) + } + + // ---------------------------------------------------------------------------- + // SSGen Tx Handling + // ---------------------------------------------------------------------------- + // PER SSGEN + // 1. Retrieve an emulated ticket database of SStxMemMaps from both the + // ticket database and the ticket store. + // 2. Check to ensure that the tickets included in the block are the ones + // that indeed should have been included according to the emulated + // ticket database. + // 3. Check to make sure that the SSGen votes on the correct block/height. + + // PER BLOCK + // 4. Check and make sure that we have the same number of SSGen tx as we do + // votes. + // 5. Check for voters overflows (highly unlikely, but check anyway). + // 6. Ensure that the block votes on tx tree regular of the previous block in + // the way of the majority of the voters. + // 7. Check final state and ensure that it matches. + + // Store the number of SSGen tx and votes to check later. + numSSGenTx := 0 + voteYea := 0 + voteNay := 0 + + // 1. Retrieve an emulated ticket database of SStxMemMaps from both the + // ticket database and the ticket store. + ticketsWhichCouldBeUsed := make(map[chainhash.Hash]struct{}, ticketsPerBlock) + ticketSlice, calcPoolSize, finalStateCalc, err := + b.getWinningTicketsInclStore(node, tixStore) + if err != nil { + errStr := fmt.Sprintf("unexpected getWinningTicketsInclStore error: %v", + err.Error()) + return errors.New(errStr) + } + + // 2. Obtain the tickets which could have been used on the block for votes + // and then check below to make sure that these were indeed the tickets + // used. + for _, ticketHash := range ticketSlice { + ticketsWhichCouldBeUsed[ticketHash] = struct{}{} + } + + for _, staketx := range stakeTransactions { + if is, _ := stake.IsSSGen(staketx); is { + numSSGenTx++ + + // Check and store the vote for TxTreeRegular. + ssGenVoteBits := stake.GetSSGenVoteBits(staketx) + if dcrutil.IsFlagSet16(ssGenVoteBits, dcrutil.BlockValid) { + voteYea++ + } else { + voteNay++ + } + + // Grab the input SStx hash from the inputs of the transaction. + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[1] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + // Check to make sure this was actually a ticket we were allowed to + // use. + _, ticketAvailable := ticketsWhichCouldBeUsed[sstxHash] + if !ticketAvailable { + errStr := fmt.Sprintf("Error in stake consensus: Ticket %v was "+ + "not found to be available in the stake patch or database, "+ + "yet block %v spends it!", sstxHash, blockSha) + return ruleError(ErrTicketUnavailable, errStr) + } + + // 3. Check to make sure that the SSGen tx votes on the parent block of + // the block in which it is included. + votedOnSha, votedOnHeight, err := stake.GetSSGenBlockVotedOn(staketx) + if err != nil { + errStr := fmt.Sprintf("unexpected vote tx decode error: %v", + err.Error()) + return ruleError(ErrUnparseableSSGen, errStr) + } + + if !(votedOnSha.IsEqual(prevBlockHash)) || + (votedOnHeight != uint32(block.Height())-1) { + txSha := msgTx.TxSha() + errStr := fmt.Sprintf("Error in stake consensus: SSGen %v voted "+ + "on block %v at height %v, however it was found inside "+ + "block %v at height %v!", txSha, votedOnSha, + votedOnHeight, prevBlockHash, block.Height()-1) + return ruleError(ErrVotesOnWrongBlock, errStr) + } + } + } + + // 4. Check and make sure that we have the same number of SSGen tx as we do + // votes. + if uint16(numSSGenTx) != voters { + errStr := fmt.Sprintf("Error in stake consensus: The number of SSGen tx"+ + " in block %v was not the %v voters expected from the header!", + blockSha, voters) + return ruleError(ErrVotesMismatch, errStr) + } + + // 5. Check for too many voters (already checked in block sanity, but check + // again). + if numSSGenTx > int(chainParams.TicketsPerBlock) { + errStr := fmt.Sprintf("Error in stake consensus: the number of SSGen tx "+ + "in block %v was %v, overflowing the maximum allowed (%v)", + blockSha, numSSGenTx, int(chainParams.TicketsPerBlock)) + return ruleError(ErrTooManyVotes, errStr) + } + + // 6. Determine if TxTreeRegular should be valid or not, and then check it + // against what is provided in the block header. + if (voteYea <= voteNay) && txTreeRegularValid { + errStr := fmt.Sprintf("Error in stake consensus: the voters voted "+ + "against parent TxTreeRegular inclusion in block %v, but the "+ + "block header indicates it was voted for", blockSha) + return ruleError(ErrIncongruentVotebit, errStr) + } + if (voteYea > voteNay) && !txTreeRegularValid { + errStr := fmt.Sprintf("Error in stake consensus: the voters voted "+ + "for parent TxTreeRegular inclusion in block %v, but the "+ + "block header indicates it was voted against", blockSha) + return ruleError(ErrIncongruentVotebit, errStr) + } + + // 7. Check the final state of the lottery PRNG and ensure that it matches. + if finalStateCalc != finalState { + errStr := fmt.Sprintf("Error in stake consensus: the final state of "+ + "the lottery PRNG was calculated to be %x, but %x was found in "+ + "the block", finalStateCalc, finalState) + return ruleError(ErrInvalidFinalState, errStr) + } + + // ---------------------------------------------------------------------------- + // SSRtx Tx Handling + // ---------------------------------------------------------------------------- + // PER SSRTX + // 1. Ensure that the SSRtx has been marked missed in the ticket patch data + // and, if not, ensure it has been marked missed in the ticket database. + // 2. Ensure that at least ticketMaturity many blocks has passed since the + // SStx it references was included in the blockchain. + // PER BLOCK + // 3. Check and make sure that we have the same number of SSRtx tx as we do + // revocations. + // 4. Check for revocation overflows. + numSSRtxTx := 0 + + missedTickets, err := b.GenerateMissedTickets(tixStore) + if err != nil { + h := block.Sha() + str := fmt.Sprintf("Failed to generate missed tickets data "+ + "for block %v, height %v! Error given: ", + h, + block.Height(), + err.Error()) + return errors.New(str) + } + + for _, staketx := range stakeTransactions { + if is, _ := stake.IsSSRtx(staketx); is { + numSSRtxTx++ + + // Grab the input SStx hash from the inputs of the transaction. + msgTx := staketx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + ticketMissed := false + + if _, exists := missedTickets[sstxHash]; exists { + ticketMissed = true + } + + if !ticketMissed { + errStr := fmt.Sprintf("Error in stake consensus: Ticket %v was "+ + "not found to be missed in the stake patch or database, "+ + "yet block %v spends it!", sstxHash, blockSha) + return ruleError(ErrInvalidSSRtx, errStr) + } + } + } + + // 3. Check and make sure that we have the same number of SSRtx tx as we do + // votes. + if uint8(numSSRtxTx) != revocations { + errStr := fmt.Sprintf("Error in stake consensus: The number of SSRtx tx"+ + " in block %v was not the %v revocations expected from the header! "+ + "(%v found)", blockSha, revocations, numSSRtxTx) + return ruleError(ErrInvalidRevNum, errStr) + } + + // 4. Check for revocation overflows. Should be impossible given the above + // check, but check anyway. + if numSSRtxTx > math.MaxUint8 { + errStr := fmt.Sprintf("Error in stake consensus: the number of SSRtx tx "+ + "in block %v was %v, overflowing the maximum allowed (255)", blockSha, + numSSRtxTx) + return ruleError(ErrTooManyRevocations, errStr) + } + + // ---------------------------------------------------------------------------- + // Final Checks + // ---------------------------------------------------------------------------- + // 1. Make sure that all the tx in the stake tx tree are either SStx, SSGen, + // or SSRtx. + // 2. Check and make sure that the ticketpool size is calculated correctly + // after account for spent, missed, and expired tickets. + + // 1. Ensure that all stake transactions are accounted for. If not, this + // indicates that there was some sort of non-standard stake tx present + // in the block. This is already checked before, but check again here. + stakeTxSum := numSStxTx + numSSGenTx + numSSRtxTx + + if stakeTxSum != len(stakeTransactions) { + errStr := fmt.Sprintf("Error in stake consensus: the number of stake tx "+ + "in block %v was %v, however we expected %v", block.Sha(), stakeTxSum, + len(stakeTransactions)) + return ruleError(ErrNonstandardStakeTx, errStr) + } + + // 2. Check the ticket pool size. + if calcPoolSize != poolSize { + errStr := fmt.Sprintf("Error in stake consensus: the poolsize "+ + "in block %v was %v, however we expected %v", + node.hash, + poolSize, + calcPoolSize) + return ruleError(ErrPoolSize, errStr) + } + + return nil +} + // CheckTransactionInputs performs a series of checks on the inputs to a // transaction to ensure they are valid. An example of some of the checks // include verifying all inputs exist, ensuring the coinbase seasoning // requirements are met, detecting double spends, validating all values and fees // are in the legal range and the total output amount doesn't exceed the input // amount, and verifying the signatures to prove the spender was the owner of -// the bitcoins and therefore allowed to spend them. As it checks the inputs, +// the decred and therefore allowed to spend them. As it checks the inputs, // it also calculates the total fees for the transaction and returns that value. -func CheckTransactionInputs(tx *btcutil.Tx, txHeight int64, txStore TxStore) (int64, error) { +func CheckTransactionInputs(tx *dcrutil.Tx, txHeight int64, txStore TxStore, + checkFraudProof bool, chainParams *chaincfg.Params) (int64, error) { + // Expired transactions are not allowed. + if tx.MsgTx().Expiry != wire.NoExpiryValue { + if txHeight >= int64(tx.MsgTx().Expiry) { + errStr := fmt.Sprintf("Transaction indicated an expiry of %v"+ + " while the current height is %v", tx.MsgTx().Expiry, txHeight) + return 0, ruleError(ErrExpiredTx, errStr) + } + } + + ticketMaturity := int64(chainParams.TicketMaturity) + stakeEnabledHeight := chainParams.StakeEnabledHeight + txHash := tx.Sha() + var totalAtomIn int64 + // Coinbase transactions have no inputs. if IsCoinBase(tx) { return 0, nil } - txHash := tx.Sha() - var totalSatoshiIn int64 - for _, txIn := range tx.MsgTx().TxIn { + // ---------------------------------------------------------------------------- + // Decred stake transaction testing. + // ---------------------------------------------------------------------------- + + // SSTX ----------------------------------------------------------------------- + // 1. Check and make sure that the output amounts in the committments to the + // ticket are correctly calculated. + + // 1. Check and make sure that the output amounts in the committments to the + // ticket are correctly calculated. + isSStx, _ := stake.IsSStx(tx) + if isSStx { + msgTx := tx.MsgTx() + + sstxInAmts := make([]int64, len(msgTx.TxIn)) + + for idx, txIn := range msgTx.TxIn { + // Ensure the input is available. + txInHash := &txIn.PreviousOutPoint.Hash + originTx, exists := txStore[*txInHash] + + if !exists || originTx.Err != nil || originTx.Tx == nil { + str := fmt.Sprintf("unable to find input transaction "+ + "%v for transaction %v", txInHash, txHash) + return 0, ruleError(ErrMissingTx, str) + } + + // Ensure the transaction is not double spending coins. + originTxIndex := txIn.PreviousOutPoint.Index + originTxMsgTx := originTx.Tx.MsgTx() + + if int(originTxIndex) >= len(originTxMsgTx.TxOut) { + errStr := fmt.Sprintf("SStx input using tx %x, txout %v "+ + "referenced a txout that was out of range", + originTx.Tx.Sha(), + originTxIndex) + return 0, ruleError(ErrBadTxInput, errStr) + } + + // Check and make sure that the input is P2PKH or P2SH. + thisPkVersion := originTxMsgTx.TxOut[originTxIndex].Version + thisPkScript := originTxMsgTx.TxOut[originTxIndex].PkScript + class := txscript.GetScriptClass(thisPkVersion, thisPkScript) + if txscript.IsStakeOutput(thisPkScript) { + class, _ = txscript.GetStakeOutSubclass(thisPkScript) + } + + if !(class == txscript.PubKeyHashTy || + class == txscript.ScriptHashTy) { + errStr := fmt.Sprintf("SStx input using tx %x, txout %v "+ + "referenced a txout that was not a PubKeyHashTy or "+ + "ScriptHashTy pkScript (class: %v)", + originTx.Tx.Sha(), + originTxIndex, + class) + return 0, ruleError(ErrSStxInScrType, errStr) + } + + // Get the value of the input. + sstxInAmts[idx] = originTxMsgTx.TxOut[originTxIndex].Value + } + + _, _, sstxOutAmts, sstxChangeAmts, _, _ := + stake.GetSStxStakeOutputInfo(tx) + _, sstxOutAmtsCalc, err := stake.GetSStxNullOutputAmounts(sstxInAmts, + sstxChangeAmts, + msgTx.TxOut[0].Value) + if err != nil { + return 0, err + } + + err = stake.VerifySStxAmounts(sstxOutAmts, sstxOutAmtsCalc) + if err != nil { + errStr := fmt.Sprintf("SStx output commitment amounts were not the "+ + "same as calculated amounts; Error returned %v", err) + return 0, ruleError(ErrSStxCommitment, errStr) + } + } + + // SSGEN ---------------------------------------------------------------------- + // 1. Check SSGen output + rewards to make sure they're in line with the + // consensus code and what the outputs are in the original SStx. Also + // check to ensure that there is congruency for output PKH from SStx to + // SSGen outputs. + // Check also that the input transaction was an SStx. + // 2. Make sure the second input is an SStx tagged output. + // 3. Check to make sure that the difference in height between the current + // block and the block the SStx was included in is > ticketMaturity. + + // Save whether or not this is an SSGen tx; if it is, we need to skip the + // input check of the stakebase later, and another input check for OP_SSTX + // tagged output uses. + isSSGen, _ := stake.IsSSGen(tx) + if isSSGen { + // Cursory check to see if we've even reached stake-enabled height. + if txHeight < stakeEnabledHeight { + errStr := fmt.Sprintf("SSGen tx appeared in block height %v before "+ + "stake enabled height %v", txHeight, stakeEnabledHeight) + return 0, ruleError(ErrInvalidEarlyStakeTx, errStr) + } + + // Grab the input SStx hash from the inputs of the transaction. + msgTx := tx.MsgTx() + nullIn := msgTx.TxIn[0] + sstxIn := msgTx.TxIn[1] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + // Calculate the theoretical stake vote subsidy by extracting the vote + // height. Should be impossible because IsSSGen requires this byte string + // to be a certain number of bytes. + _, heightVotingOn, err := stake.GetSSGenBlockVotedOn(tx) + if err != nil { + errStr := fmt.Sprintf("Could not parse SSGen block vote information "+ + "from SSGen %v; Error returned %v", + txHash, err) + return 0, ruleError(ErrUnparseableSSGen, errStr) + } + + stakeVoteSubsidy := CalcStakeVoteSubsidy(int64(heightVotingOn), + chainParams) + + // AmountIn for the input should be equal to the stake subsidy. + if nullIn.ValueIn != stakeVoteSubsidy { + errStr := fmt.Sprintf("bad stake vote subsidy; got %v, expect %v", + nullIn.ValueIn, stakeVoteSubsidy) + return 0, ruleError(ErrBadStakebaseAmountIn, errStr) + } + + // 1. Fetch the input sstx transaction from the txstore and then check + // to make sure that the reward has been calculated correctly from the + // subsidy and the inputs. + // We also need to make sure that the SSGen outputs that are P2PKH go + // to the addresses specified in the original SSTx. Check that too. + originTx, exists := txStore[sstxHash] + if !exists || originTx.Err != nil || originTx.Tx == nil { + errStr := fmt.Sprintf("Unable to find input sstx transaction "+ + "%v for transaction %v", sstxHash, txHash) + return 0, ruleError(ErrMissingTx, errStr) + } + + // While we're here, double check to make sure that the input is from an + // SStx. By doing so, you also ensure the first output is OP_SSTX tagged. + if isSStx, _ := stake.IsSStx(originTx.Tx); !isSStx { + errStr := fmt.Sprintf("Input transaction %v for SSGen %v was not"+ + "an SStx tx (given input: %v)", txHash, sstxHash) + return 0, ruleError(ErrInvalidSSGenInput, errStr) + } + + // Make sure it's using the 0th output. + if sstxIn.PreviousOutPoint.Index != 0 { + errStr := fmt.Sprintf("Input transaction %v for SSGen %v did not"+ + "reference the first output (given idx %v)", txHash, + sstxIn.PreviousOutPoint.Index) + return 0, ruleError(ErrInvalidSSGenInput, errStr) + } + + sstxMsgTx := originTx.Tx.MsgTx() + + sstxPayTypes, sstxPkhs, sstxAmts, _, sstxRules, sstxLimits := + stake.GetSStxStakeOutputInfo(originTx.Tx) + + ssgenPayTypes, ssgenPkhs, ssgenAmts, err := + stake.GetSSGenStakeOutputInfo(tx, chainParams) + if err != nil { + errStr := fmt.Sprintf("Could not decode outputs for SSgen %v: %v", + txHash, err.Error()) + return 0, ruleError(ErrSSGenPayeeOuts, errStr) + } + + // Quick check to make sure the number of SStx outputs is equal to + // the number of SSGen outputs. + if (len(sstxPayTypes) != len(ssgenPayTypes)) || + (len(sstxPkhs) != len(ssgenPkhs)) || + (len(sstxAmts) != len(ssgenAmts)) { + errStr := fmt.Sprintf("Incongruent payee number for SSGen "+ + "%v and input SStx %v", txHash, sstxHash) + return 0, ruleError(ErrSSGenPayeeNum, errStr) + } + + // Get what the stake payouts should be after appending the reward + // to each output. + ssgenCalcAmts := stake.GetStakeRewards(sstxAmts, + sstxMsgTx.TxOut[0].Value, + stakeVoteSubsidy) + + /* + err = stake.VerifyStakingPkhsAndAmounts(sstxPayTypes, + sstxPkhs, + ssrtxAmts, + ssrtxPayTypes, + ssrtxPkhs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + */ + + // Check that the generated slices for pkhs and amounts are congruent. + err = stake.VerifyStakingPkhsAndAmounts(sstxPayTypes, + sstxPkhs, + ssgenAmts, + ssgenPayTypes, + ssgenPkhs, + ssgenCalcAmts, + true, // Vote + sstxRules, + sstxLimits) + + if err != nil { + errStr := fmt.Sprintf("Stake reward consensus violation for "+ + "SStx input %v and SSGen output %v: %v", sstxHash, txHash, err) + return 0, ruleError(ErrSSGenPayeeOuts, errStr) + } + + // 2. Check to make sure that the second input was an OP_SSTX tagged + // output from the referenced SStx. + if txscript.GetScriptClass(sstxMsgTx.TxOut[0].Version, + sstxMsgTx.TxOut[0].PkScript) != txscript.StakeSubmissionTy { + errStr := fmt.Sprintf("First SStx output in SStx %v referenced "+ + "by SSGen %v should have been OP_SSTX tagged, but it was "+ + "not", sstxHash, txHash) + return 0, ruleError(ErrInvalidSSGenInput, errStr) + } + + // 3. Check to ensure that ticket maturity number of blocks have passed + // between the block the SSGen plans to go into and the block in which + // the SStx was originally found in. + originHeight := originTx.BlockHeight + blocksSincePrev := txHeight - originHeight + + // NOTE: You can only spend an OP_SSTX tagged output on the block AFTER + // the entire range of ticketMaturity has passed, hence <= instead of <. + if blocksSincePrev <= ticketMaturity { + errStr := fmt.Sprintf("tried to spend sstx output from "+ + "transaction %v from height %v at height %v before "+ + "required ticket maturity of %v+1 blocks", sstxHash, originHeight, + txHeight, ticketMaturity) + return 0, ruleError(ErrSStxInImmature, errStr) + } + } + + // SSRTX ---------------------------------------------------------------------- + // 1. Ensure the only input present is an OP_SSTX tagged output, and that the + // input transaction is actually an SStx. + // 2. Ensure that payouts are to the original SStx NullDataTy outputs in the + // amounts given there, to the public key hashes given then. + // 3. Check to make sure that the difference in height between the current + // block and the block the SStx was included in is > ticketMaturity. + + // Save whether or not this is an SSRtx tx; if it is, we need to know this + // later input check for OP_SSTX outs. + isSSRtx, _ := stake.IsSSRtx(tx) + + if isSSRtx { + // Cursory check to see if we've even reach stake-enabled height. + // Note for an SSRtx to be valid a vote must be missed, so for SSRtx the + // height of allowance is +1. + if txHeight < stakeEnabledHeight+1 { + errStr := fmt.Sprintf("SSRtx tx appeared in block height %v before "+ + "stake enabled height+1 %v", txHeight, stakeEnabledHeight+1) + return 0, ruleError(ErrInvalidEarlyStakeTx, errStr) + } + + // Grab the input SStx hash from the inputs of the transaction. + msgTx := tx.MsgTx() + sstxIn := msgTx.TxIn[0] // sstx input + sstxHash := sstxIn.PreviousOutPoint.Hash + + // 1. Fetch the input sstx transaction from the txstore and then check + // to make sure that the reward has been calculated correctly from the + // subsidy and the inputs. + // We also need to make sure that the SSGen outputs that are P2PKH go + // to the addresses specified in the original SSTx. Check that too. + originTx, exists := txStore[sstxHash] + if !exists || originTx.Err != nil || originTx.Tx == nil { + errStr := fmt.Sprintf("Unable to find input sstx transaction "+ + "%v for transaction %v", sstxHash, txHash) + return 0, ruleError(ErrMissingTx, errStr) + } + + // While we're here, double check to make sure that the input is from an + // SStx. By doing so, you also ensure the first output is OP_SSTX tagged. + if isSStx, _ := stake.IsSStx(originTx.Tx); !isSStx { + errStr := fmt.Sprintf("Input transaction %v for SSRtx %v was not"+ + "an SStx tx", txHash, sstxHash) + return 0, ruleError(ErrInvalidSSRtxInput, errStr) + } + + sstxMsgTx := originTx.Tx.MsgTx() + + sstxPayTypes, sstxPkhs, sstxAmts, _, sstxRules, sstxLimits := + stake.GetSStxStakeOutputInfo(originTx.Tx) + + // This should be impossible to hit given the strict bytecode + // size restrictions for components of SSRtxs already checked + // for in IsSSRtx. + ssrtxPayTypes, ssrtxPkhs, ssrtxAmts, err := + stake.GetSSRtxStakeOutputInfo(tx, chainParams) + if err != nil { + errStr := fmt.Sprintf("Could not decode outputs for SSRtx %v: %v", + txHash, err.Error()) + return 0, ruleError(ErrSSRtxPayees, errStr) + } + + // Quick check to make sure the number of SStx outputs is equal to + // the number of SSGen outputs. + if (len(sstxPkhs) != len(ssrtxPkhs)) || + (len(sstxAmts) != len(ssrtxAmts)) { + errStr := fmt.Sprintf("Incongruent payee number for SSRtx "+ + "%v and input SStx %v", txHash, sstxHash) + return 0, ruleError(ErrSSRtxPayeesMismatch, errStr) + } + + // Get what the stake payouts should be after appending the reward + // to each output. + ssrtxCalcAmts := stake.GetStakeRewards(sstxAmts, + sstxMsgTx.TxOut[0].Value, + int64(0)) // SSRtx has no subsidy + + // Check that the generated slices for pkhs and amounts are congruent. + err = stake.VerifyStakingPkhsAndAmounts(sstxPayTypes, + sstxPkhs, + ssrtxAmts, + ssrtxPayTypes, + ssrtxPkhs, + ssrtxCalcAmts, + false, // Revocation + sstxRules, + sstxLimits) + + if err != nil { + errStr := fmt.Sprintf("Stake consensus violation for SStx input"+ + " %v and SSRtx output %v: %v", sstxHash, txHash, err) + return 0, ruleError(ErrSSRtxPayees, errStr) + } + + // 2. Check to make sure that the second input was an OP_SSTX tagged + // output from the referenced SStx. + if txscript.GetScriptClass(sstxMsgTx.TxOut[0].Version, + sstxMsgTx.TxOut[0].PkScript) != txscript.StakeSubmissionTy { + errStr := fmt.Sprintf("First SStx output in SStx %v referenced "+ + "by SSGen %v should have been OP_SSTX tagged, but it was "+ + "not", sstxHash, txHash) + return 0, ruleError(ErrInvalidSSRtxInput, errStr) + } + + // 3. Check to ensure that ticket maturity number of blocks have passed + // between the block the SSRtx plans to go into and the block in which + // the SStx was originally found in. + originHeight := originTx.BlockHeight + blocksSincePrev := txHeight - originHeight + + // NOTE: You can only spend an OP_SSTX tagged output on the block AFTER + // the entire range of ticketMaturity has passed, hence <= instead of <. + // Also note that for OP_SSRTX spending, the ticket needs to have been + // missed, and this can't possibly happen until reaching ticketMaturity + + // 2. + if blocksSincePrev <= ticketMaturity+1 { + errStr := fmt.Sprintf("tried to spend sstx output from "+ + "transaction %v from height %v at height %v before "+ + "required ticket maturity of %v+1 blocks", sstxHash, originHeight, + txHeight, ticketMaturity) + return 0, ruleError(ErrSStxInImmature, errStr) + } + } + + // ---------------------------------------------------------------------------- + // Decred general transaction testing (and a few stake exceptions). + // ---------------------------------------------------------------------------- + + for idx, txIn := range tx.MsgTx().TxIn { // Ensure the input is available. txInHash := &txIn.PreviousOutPoint.Hash originTx, exists := txStore[*txInHash] + + // Inputs won't exist for stakebase tx, so ignore them. + if isSSGen && idx == 0 { + // However, do add the reward amount. + _, heightVotingOn, _ := stake.GetSSGenBlockVotedOn(tx) + stakeVoteSubsidy := CalcStakeVoteSubsidy(int64(heightVotingOn), + chainParams) + totalAtomIn += stakeVoteSubsidy + continue + } + if !exists || originTx.Err != nil || originTx.Tx == nil { str := fmt.Sprintf("unable to find input transaction "+ "%v for transaction %v", txInHash, txHash) return 0, ruleError(ErrMissingTx, str) } + // Check fraud proof witness data. + originTxIndex := txIn.PreviousOutPoint.Index + + // Using zero value outputs as inputs is banned. + if originTx.Tx.MsgTx().TxOut[originTxIndex].Value == 0 { + str := fmt.Sprintf("tried to spend zero value output from input %v,"+ + " idx %v", + originTx.Tx.Sha(), + originTxIndex) + return 0, ruleError(ErrZeroValueOutputSpend, str) + } + + if checkFraudProof { + if txIn.ValueIn != + originTx.Tx.MsgTx().TxOut[originTxIndex].Value { + str := fmt.Sprintf("bad fraud check value in (expected %v, "+ + "given %v) for txIn %v", + originTx.Tx.MsgTx().TxOut[originTxIndex].Value, + txIn.ValueIn, idx) + return 0, ruleError(ErrFraudAmountIn, str) + } + + if int64(txIn.BlockHeight) != originTx.BlockHeight { + str := fmt.Sprintf("bad fraud check block height (expected %v, "+ + "given %v) for txIn %v", originTx.BlockHeight, + txIn.BlockHeight, idx) + return 0, ruleError(ErrFraudBlockHeight, str) + } + + if txIn.BlockIndex != originTx.BlockIndex { + str := fmt.Sprintf("bad fraud check block index (expected %v, "+ + "given %v) for txIn %v", originTx.BlockIndex, txIn.BlockIndex, + idx) + return 0, ruleError(ErrFraudBlockIndex, str) + } + } + // Ensure the transaction is not spending coins which have not // yet reached the required coinbase maturity. + coinbaseMaturity := int64(chainParams.CoinbaseMaturity) if IsCoinBase(originTx.Tx) { originHeight := originTx.BlockHeight blocksSincePrev := txHeight - originHeight if blocksSincePrev < coinbaseMaturity { - str := fmt.Sprintf("tried to spend coinbase "+ + str := fmt.Sprintf("tx %v tried to spend coinbase "+ "transaction %v from height %v at "+ "height %v before required maturity "+ + "of %v blocks", txHash, txInHash, originHeight, + txHeight, coinbaseMaturity) + return 0, ruleError(ErrImmatureSpend, str) + } + } + + // Ensure that the transaction is not spending coins from a + // transaction that included an expiry but which has not yet + // reached coinbase maturity many blocks. + if originTx.Tx.MsgTx().Expiry != wire.NoExpiryValue { + originHeight := originTx.BlockHeight + blocksSincePrev := txHeight - originHeight + if blocksSincePrev < coinbaseMaturity { + str := fmt.Sprintf("tx %v tried to spend "+ + "transaction %v including an expiry "+ + "from height %v at height %v before "+ + "required maturity of %v blocks", + txHash, txInHash, originHeight, + txHeight, coinbaseMaturity) + return 0, ruleError(ErrExpiryTxSpentEarly, str) + } + } + + // Ensure the transaction is not double spending coins. + if originTxIndex >= uint32(len(originTx.Spent)) { + str := fmt.Sprintf("out of bounds input index %d in "+ + "transaction %v referenced from transaction %v", + originTxIndex, txInHash, txHash) + return 0, ruleError(ErrBadTxInput, str) + } + if originTx.Spent[originTxIndex] && !(isSSGen || isSSRtx) { + str := fmt.Sprintf("transaction %v tried to double "+ + "spend coins from transaction %v", txHash, + txInHash) + return 0, ruleError(ErrDoubleSpend, str) + } + + // Ensure that the outpoint's tx tree makes sense. + originTxOPTree := txIn.PreviousOutPoint.Tree + originTxType := stake.DetermineTxType(originTx.Tx) + indicatedTree := dcrutil.TxTreeRegular + if originTxType != stake.TxTypeRegular { + indicatedTree = dcrutil.TxTreeStake + } + if indicatedTree != originTxOPTree { + errStr := fmt.Sprintf("Tx %v attempted to spend from a %v "+ + "tx tree, yet the outpoint specified a %v tx tree "+ + "instead", + txHash, + indicatedTree, + originTxOPTree) + return 0, ruleError(ErrDiscordantTxTree, errStr) + } + + // The only transaction types that are allowed to spend from OP_SSTX + // tagged outputs are SSGen or SSRtx tx. + // So, check all the inputs from non SSGen or SSRtx and make sure that + // they spend no OP_SSTX tagged outputs. + originTxMsgTx := originTx.Tx.MsgTx() + + if !(isSSGen || isSSRtx) { + if txscript.GetScriptClass(originTxMsgTx.TxOut[originTxIndex].Version, + originTxMsgTx.TxOut[originTxIndex].PkScript) == + txscript.StakeSubmissionTy { + _, errIsSSGen := stake.IsSSGen(tx) + _, errIsSSRtx := stake.IsSSRtx(tx) + errStr := fmt.Sprintf("Tx %v attempted to spend an OP_SSTX "+ + "tagged output, however it was not an SSGen or SSRtx tx"+ + "; IsSSGen err: %v, isSSRtx err: %v", + txHash, + errIsSSGen.Error(), + errIsSSRtx.Error()) + return 0, ruleError(ErrTxSStxOutSpend, errStr) + } + } + + // OP_SSGEN and OP_SSRTX tagged outputs can only be spent after + // coinbase maturity many blocks. + scriptClass := txscript.GetScriptClass( + originTxMsgTx.TxOut[originTxIndex].Version, + originTxMsgTx.TxOut[originTxIndex].PkScript) + if scriptClass == txscript.OP_SSGEN || + scriptClass == txscript.OP_SSRTX { + + originHeight := originTx.BlockHeight + blocksSincePrev := txHeight - originHeight + if blocksSincePrev < int64(chainParams.SStxChangeMaturity) { + str := fmt.Sprintf("tried to spend OP_SSGEN or "+ + "OP_SSRTX output from tx %v from height %v at "+ + "height %v before required maturity "+ "of %v blocks", txInHash, originHeight, txHeight, coinbaseMaturity) return 0, ruleError(ErrImmatureSpend, str) } } - // Ensure the transaction is not double spending coins. - originTxIndex := txIn.PreviousOutPoint.Index - if originTxIndex >= uint32(len(originTx.Spent)) { - str := fmt.Sprintf("out of bounds input index %d in "+ - "transaction %v referenced from transaction %v", - originTxIndex, txInHash, txHash) - return 0, ruleError(ErrBadTxInput, str) - } - if originTx.Spent[originTxIndex] { - str := fmt.Sprintf("transaction %v tried to double "+ - "spend output %v", txHash, txIn.PreviousOutPoint) - return 0, ruleError(ErrDoubleSpend, str) + // SStx change outputs may only be spent after sstx change maturity many + // blocks. + if scriptClass == txscript.StakeSubChangeTy { + originHeight := originTx.BlockHeight + blocksSincePrev := txHeight - originHeight + if blocksSincePrev < int64(chainParams.SStxChangeMaturity) { + str := fmt.Sprintf("tried to spend SStx change "+ + "output from tx %v from height %v at "+ + "height %v before required maturity "+ + "of %v blocks", txInHash, originHeight, + txHeight, chainParams.SStxChangeMaturity) + return 0, ruleError(ErrImmatureSpend, str) + } } // Ensure the transaction amounts are in range. Each of the // output values of the input transactions must not be negative // or more than the max allowed per transaction. All amounts in - // a transaction are in a unit value known as a satoshi. One - // bitcoin is a quantity of satoshi as defined by the - // SatoshiPerBitcoin constant. - originTxSatoshi := originTx.Tx.MsgTx().TxOut[originTxIndex].Value - if originTxSatoshi < 0 { + // a transaction are in a unit value known as a atom. One + // decred is a quantity of atoms as defined by the + // AtomPerCoin constant. + originTxAtom := originTx.Tx.MsgTx().TxOut[originTxIndex].Value + if originTxAtom < 0 { str := fmt.Sprintf("transaction output has negative "+ - "value of %v", originTxSatoshi) + "value of %v", originTxAtom) return 0, ruleError(ErrBadTxOutValue, str) } - if originTxSatoshi > btcutil.MaxSatoshi { + if originTxAtom > dcrutil.MaxAmount { str := fmt.Sprintf("transaction output value of %v is "+ "higher than max allowed value of %v", - originTxSatoshi, btcutil.MaxSatoshi) + originTxAtom, dcrutil.MaxAmount) return 0, ruleError(ErrBadTxOutValue, str) } // The total of all outputs must not be more than the max // allowed per transaction. Also, we could potentially overflow // the accumulator so check for overflow. - lastSatoshiIn := totalSatoshiIn - totalSatoshiIn += originTxSatoshi - if totalSatoshiIn < lastSatoshiIn || - totalSatoshiIn > btcutil.MaxSatoshi { + lastAtomIn := totalAtomIn + totalAtomIn += originTxAtom + if totalAtomIn < lastAtomIn || + totalAtomIn > dcrutil.MaxAmount { str := fmt.Sprintf("total value of all transaction "+ "inputs is %v which is higher than max "+ - "allowed value of %v", totalSatoshiIn, - btcutil.MaxSatoshi) + "allowed value of %v", totalAtomIn, + dcrutil.MaxAmount) return 0, ruleError(ErrBadTxOutValue, str) } @@ -943,109 +1953,79 @@ func CheckTransactionInputs(tx *btcutil.Tx, txHeight int64, txStore TxStore) (in // Calculate the total output amount for this transaction. It is safe // to ignore overflow and out of range errors here because those error // conditions would have already been caught by checkTransactionSanity. - var totalSatoshiOut int64 - for _, txOut := range tx.MsgTx().TxOut { - totalSatoshiOut += txOut.Value + var totalAtomOut int64 + for i, txOut := range tx.MsgTx().TxOut { + totalAtomOut += txOut.Value + + // Double check and make sure that, if this is not a stake transaction, + // that no outputs have OP code tags OP_SSTX, OP_SSRTX, OP_SSGEN, or + // OP_SSTX_CHANGE. + if !isSStx && !isSSGen && !isSSRtx { + scriptClass := txscript.GetScriptClass(txOut.Version, txOut.PkScript) + if (scriptClass == txscript.StakeSubmissionTy) || + (scriptClass == txscript.StakeGenTy) || + (scriptClass == txscript.StakeRevocationTy) || + (scriptClass == txscript.StakeSubChangeTy) { + errStr := fmt.Sprintf("Non-stake tx %v included stake output "+ + "type %v at in txout at position %v", txHash, scriptClass, i) + return 0, ruleError(ErrRegTxSpendStakeOut, errStr) + } + + // Check to make sure that non-stake transactions also are not + // using stake tagging OP codes anywhere else in their output + // pkScripts. + hasStakeOpCodes, err := txscript.ContainsStakeOpCodes(txOut.PkScript) + if err != nil { + return 0, ruleError(ErrScriptMalformed, err.Error()) + } + if hasStakeOpCodes { + errStr := fmt.Sprintf("Non-stake tx %v included stake OP code "+ + "in txout at position %v", txHash, i) + return 0, ruleError(ErrScriptMalformed, errStr) + } + } } // Ensure the transaction does not spend more than its inputs. - if totalSatoshiIn < totalSatoshiOut { + if totalAtomIn < totalAtomOut { str := fmt.Sprintf("total value of all transaction inputs for "+ "transaction %v is %v which is less than the amount "+ - "spent of %v", txHash, totalSatoshiIn, totalSatoshiOut) + "spent of %v", txHash, totalAtomIn, totalAtomOut) return 0, ruleError(ErrSpendTooHigh, str) } // NOTE: bitcoind checks if the transaction fees are < 0 here, but that // is an impossible condition because of the check above that ensures // the inputs are >= the outputs. - txFeeInSatoshi := totalSatoshiIn - totalSatoshiOut - return txFeeInSatoshi, nil + txFeeInAtom := totalAtomIn - totalAtomOut + + return txFeeInAtom, nil } -// checkConnectBlock performs several checks to confirm connecting the passed -// block to the main chain (including whatever reorganization might be necessary -// to get this node to the main chain) does not violate any rules. -// -// The CheckConnectBlock function makes use of this function to perform the -// bulk of its work. The only difference is this function accepts a node which -// may or may not require reorganization to connect it to the main chain whereas -// CheckConnectBlock creates a new node which specifically connects to the end -// of the current main chain and then calls this function with that node. -// -// See the comments for CheckConnectBlock for some examples of the type of -// checks performed by this function. -func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) error { - // If the side chain blocks end up in the database, a call to - // CheckBlockSanity should be done here in case a previous version - // allowed a block that is no longer valid. However, since the - // implementation only currently uses memory for the side chain blocks, - // it isn't currently necessary. +// checkP2SHNumSigOps Checks the number of P2SH signature operations to make +// sure they don't overflow the limits. +// TxTree true == Regular, false == Stake +func checkP2SHNumSigOps(txs []*dcrutil.Tx, txInputStore TxStore, + txTree bool) error { + totalSigOps := 0 + for i, tx := range txs { + isSSGen, _ := stake.IsSSGen(tx) + numsigOps := CountSigOps(tx, (i == 0) && txTree, isSSGen) - // The coinbase for the Genesis block is not spendable, so just return - // now. - if node.hash.IsEqual(b.chainParams.GenesisHash) && b.bestChain == nil { - return nil - } - - // BIP0030 added a rule to prevent blocks which contain duplicate - // transactions that 'overwrite' older transactions which are not fully - // spent. See the documentation for checkBIP0030 for more details. - // - // There are two blocks in the chain which violate this - // rule, so the check must be skipped for those blocks. The - // isBIP0030Node function is used to determine if this block is one - // of the two blocks that must be skipped. - enforceBIP0030 := !isBIP0030Node(node) - if enforceBIP0030 { - err := b.checkBIP0030(node, block) + // Since the first (and only the first) transaction has + // already been verified to be a coinbase transaction, + // use (i == 0) && TxTree as an optimization for the + // flag to countP2SHSigOps for whether or not the + // transaction is a coinbase transaction rather than + // having to do a full coinbase check again. + numP2SHSigOps, err := CountP2SHSigOps(tx, (i == 0) && txTree, isSSGen, + txInputStore) if err != nil { + log.Tracef("CountP2SHSigOps failed; error "+ + "returned %v", err.Error()) return err } - } - - // Request a map that contains all input transactions for the block from - // the point of view of its position within the block chain. These - // transactions are needed for verification of things such as - // transaction inputs, counting pay-to-script-hashes, and scripts. - txInputStore, err := b.fetchInputTransactions(node, block) - if err != nil { - return err - } - - // BIP0016 describes a pay-to-script-hash type that is considered a - // "standard" type. The rules for this BIP only apply to transactions - // after the timestamp defined by txscript.Bip16Activation. See - // https://en.bitcoin.it/wiki/BIP_0016 for more details. - enforceBIP0016 := false - if node.timestamp.After(txscript.Bip16Activation) { - enforceBIP0016 = true - } - - // The number of signature operations must be less than the maximum - // allowed per block. Note that the preliminary sanity checks on a - // block also include a check similar to this one, but this check - // expands the count to include a precise count of pay-to-script-hash - // signature operations in each of the input transaction public key - // scripts. - transactions := block.Transactions() - totalSigOps := 0 - for i, tx := range transactions { - numsigOps := CountSigOps(tx) - if enforceBIP0016 { - // Since the first (and only the first) transaction has - // already been verified to be a coinbase transaction, - // use i == 0 as an optimization for the flag to - // countP2SHSigOps for whether or not the transaction is - // a coinbase transaction rather than having to do a - // full coinbase check again. - numP2SHSigOps, err := CountP2SHSigOps(tx, i == 0, - txInputStore) - if err != nil { - return err - } - numsigOps += numP2SHSigOps - } + numsigOps += numP2SHSigOps // Check for overflow or going over the limits. We have to do // this on every loop iteration to avoid overflow. @@ -1059,6 +2039,144 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) er } } + return nil +} + +// checkStakeBaseAmounts calculates the total amount given as subsidy from +// single stakebase transactions (votes) within a block. This function skips a +// ton of checks already performed by CheckTransactionInputs. +func checkStakeBaseAmounts(height int64, params *chaincfg.Params, + txs []*dcrutil.Tx, txStore TxStore) error { + for _, tx := range txs { + if is, _ := stake.IsSSGen(tx); is { + // Ensure the input is available. + txInHash := &tx.MsgTx().TxIn[1].PreviousOutPoint.Hash + originTx, exists := txStore[*txInHash] + if !exists || originTx.Err != nil || originTx.Tx == nil { + str := fmt.Sprintf("couldn't find input tx %v for stakebase "+ + "amounts check", txInHash) + return ruleError(ErrTicketUnavailable, str) + } + + originTxIndex := tx.MsgTx().TxIn[1].PreviousOutPoint.Index + originTxAtom := originTx.Tx.MsgTx().TxOut[originTxIndex].Value + + totalOutputs := int64(0) + // Sum up the outputs. + for _, out := range tx.MsgTx().TxOut { + totalOutputs += out.Value + } + + difference := totalOutputs - originTxAtom + + // Subsidy aligns with the height we're voting on, not with the + // height of the current block. + calcSubsidy := CalcStakeVoteSubsidy(height-1, params) + + if difference > calcSubsidy { + str := fmt.Sprintf("ssgen tx %v spent more than allowed "+ + "(spent %v, allowed %v)", tx.Sha(), difference, calcSubsidy) + return ruleError(ErrSSGenSubsidy, str) + } + } + } + + return nil +} + +// getStakeBaseAmounts calculates the total amount given as subsidy from +// the collective stakebase transactions (votes) within a block. This +// function skips a ton of checks already performed by +// CheckTransactionInputs. +func getStakeBaseAmounts(txs []*dcrutil.Tx, txStore TxStore) (int64, error) { + totalInputs := int64(0) + totalOutputs := int64(0) + for _, tx := range txs { + if is, _ := stake.IsSSGen(tx); is { + // Ensure the input is available. + txInHash := &tx.MsgTx().TxIn[1].PreviousOutPoint.Hash + originTx, exists := txStore[*txInHash] + if !exists || originTx.Err != nil || originTx.Tx == nil { + str := fmt.Sprintf("couldn't find input tx %v for stakebase "+ + "amounts get", + txInHash) + return 0, ruleError(ErrTicketUnavailable, str) + } + + originTxIndex := tx.MsgTx().TxIn[1].PreviousOutPoint.Index + originTxAtom := originTx.Tx.MsgTx().TxOut[originTxIndex].Value + + totalInputs += originTxAtom + + // Sum up the outputs. + for _, out := range tx.MsgTx().TxOut { + totalOutputs += out.Value + } + } + } + + return totalOutputs - totalInputs, nil +} + +// getStakeTreeFees determines the amount of fees for in the stake tx tree +// of some node given a transaction store. +func getStakeTreeFees(height int64, params *chaincfg.Params, + txs []*dcrutil.Tx, txStore TxStore) (dcrutil.Amount, error) { + totalInputs := int64(0) + totalOutputs := int64(0) + for _, tx := range txs { + isSSGen, _ := stake.IsSSGen(tx) + + for i, in := range tx.MsgTx().TxIn { + // Ignore stakebases. + if isSSGen && i == 0 { + continue + } + + txInHash := &in.PreviousOutPoint.Hash + originTx, exists := txStore[*txInHash] + if !exists || originTx.Err != nil || originTx.Tx == nil { + str := fmt.Sprintf("couldn't find input tx %v for stake "+ + "tree fee calculation", txInHash) + return 0, ruleError(ErrTicketUnavailable, str) + } + + originTxIndex := in.PreviousOutPoint.Index + originTxAtom := originTx.Tx.MsgTx().TxOut[originTxIndex].Value + + totalInputs += originTxAtom + } + + for _, out := range tx.MsgTx().TxOut { + totalOutputs += out.Value + } + + // For votes, subtract the subsidy to determine actual + // fees. + if isSSGen { + // Subsidy aligns with the height we're voting on, not with the + // height of the current block. + totalOutputs -= CalcStakeVoteSubsidy(height-1, params) + } + } + + if totalInputs < totalOutputs { + str := fmt.Sprintf("negative cumulative fees found in stake tx tree") + return 0, ruleError(ErrStakeFees, str) + } + + return dcrutil.Amount(totalInputs - totalOutputs), nil +} + +// checkTransactionInputs is the local function used to check the transaction +// inputs for a transaction list given a predetermined TxStore. +// TxTree true == Regular, false == Stake +func (b *BlockChain) checkTransactionInputs( + inputFees dcrutil.Amount, + node *blockNode, + txs []*dcrutil.Tx, + txStore TxStore, + txTree bool) error { // Perform several checks on the inputs for each transaction. Also // accumulate the total fees. This could technically be combined with // the loop above instead of running another loop over the transactions, @@ -1066,10 +2184,57 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) er // still relatively cheap as compared to running the scripts) checks // against all the inputs when the signature operations are out of // bounds. - var totalFees int64 - for _, tx := range transactions { - txFee, err := CheckTransactionInputs(tx, node.height, txInputStore) + totalFees := int64(inputFees) // Stake tx tree carry forward + for _, tx := range txs { + // Check double spending for some stake types, because + // checkTransactionInputs doesn't do this for SSGens or + // SSRtxs. + isSSGen, _ := stake.IsSSGen(tx) + isSSRtx, _ := stake.IsSSRtx(tx) + if isSSGen || isSSRtx { + for i, txIn := range tx.MsgTx().TxIn { + // Stakebase handling. + if isSSGen && i == 0 { + continue + } + + // Ensure the input is available. + txInHash := &txIn.PreviousOutPoint.Hash + originTx, exists := txStore[*txInHash] + originTxIndex := txIn.PreviousOutPoint.Index + if !exists { + str := fmt.Sprintf("missing input tx for index %d in "+ + "transaction %v referenced from stake transaction %v", + originTxIndex, txInHash, tx.Sha()) + return ruleError(ErrBadTxInput, str) + } + + // Ensure the transaction is not double spending coins. + if originTxIndex >= uint32(len(originTx.Spent)) { + str := fmt.Sprintf("out of bounds input index %d in "+ + "transaction %v referenced from stake transaction %v", + originTxIndex, txInHash, tx.Sha()) + return ruleError(ErrBadTxInput, str) + } + if originTx.Spent[originTxIndex] { + str := fmt.Sprintf("stake transaction %v tried to double "+ + "spend coins from transaction %v", tx.Sha(), + txInHash) + return ruleError(ErrDoubleSpend, str) + } + } + } + + // This step modifies the txStore and marks the tx outs used + // spent, so be aware of this. + txFee, err := CheckTransactionInputs(tx, + node.height, + txStore, + true, // Check fraud proofs + b.chainParams) if err != nil { + log.Tracef("CheckTransactionInputs failed; error "+ + "returned: %v", err) return err } @@ -1088,17 +2253,309 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) er // mining the block. It is safe to ignore overflow and out of range // errors here because those error conditions would have already been // caught by checkTransactionSanity. - var totalSatoshiOut int64 - for _, txOut := range transactions[0].MsgTx().TxOut { - totalSatoshiOut += txOut.Value + if txTree { //TxTreeRegular + // Apply penalty to fees if we're at stake validation height. + if node.height >= b.chainParams.StakeValidationHeight { + totalFees *= int64(node.header.Voters) + totalFees /= int64(b.chainParams.TicketsPerBlock) + } + + var totalAtomOutRegular int64 + + for _, txOut := range txs[0].MsgTx().TxOut { + totalAtomOutRegular += txOut.Value + } + + var expectedAtomOut int64 + if node.height == 1 { + expectedAtomOut = calcBlockSubsidy(node.height, b.chainParams) + } else { + subsidyWork := CalcBlockWorkSubsidy(node.height, node.header.Voters, + b.chainParams) + subsidyTax := CalcBlockTaxSubsidy(node.height, node.header.Voters, + b.chainParams) + expectedAtomOut = subsidyWork + subsidyTax + totalFees + } + + // AmountIn for the input should be equal to the subsidy. + coinbaseIn := txs[0].MsgTx().TxIn[0] + subsidyWithoutFees := expectedAtomOut - totalFees + if (coinbaseIn.ValueIn != subsidyWithoutFees) && (node.height > 0) { + errStr := fmt.Sprintf("bad coinbase subsidy in input; got %v, "+ + "expect %v", coinbaseIn.ValueIn, subsidyWithoutFees) + return ruleError(ErrBadCoinbaseAmountIn, errStr) + } + + if totalAtomOutRegular > expectedAtomOut { + str := fmt.Sprintf("coinbase transaction for block %v pays %v "+ + "which is more than expected value of %v", + node.hash, totalAtomOutRegular, expectedAtomOut) + return ruleError(ErrBadCoinbaseValue, str) + } + } else { // TxTreeStake + if len(txs) == 0 && node.height < b.chainParams.StakeValidationHeight { + return nil + } + if len(txs) == 0 && node.height >= b.chainParams.StakeValidationHeight { + str := fmt.Sprintf("empty tx tree stake in block after " + + "stake validation height") + return ruleError(ErrNoStakeTx, str) + } + + err := checkStakeBaseAmounts(node.height, b.chainParams, txs, txStore) + if err != nil { + return err + } + + totalAtomOutStake, err := getStakeBaseAmounts(txs, txStore) + if err != nil { + return err + } + + expectedAtomOut := int64(0) + if node.height >= b.chainParams.StakeValidationHeight { + // Subsidy aligns with the height we're voting on, not with the + // height of the current block. + expectedAtomOut = CalcStakeVoteSubsidy(node.height-1, + b.chainParams) * int64(node.header.Voters) + } else { + expectedAtomOut = totalFees + } + + if totalAtomOutStake > expectedAtomOut { + str := fmt.Sprintf("stakebase transactions for block pays %v "+ + "which is more than expected value of %v", + totalAtomOutStake, expectedAtomOut) + return ruleError(ErrBadStakebaseValue, str) + } } - expectedSatoshiOut := CalcBlockSubsidy(node.height, b.chainParams) + - totalFees - if totalSatoshiOut > expectedSatoshiOut { - str := fmt.Sprintf("coinbase transaction for block pays %v "+ - "which is more than expected value of %v", - totalSatoshiOut, expectedSatoshiOut) - return ruleError(ErrBadCoinbaseValue, str) + + return nil +} + +// checkConnectBlock performs several checks to confirm connecting the passed +// block to the main chain (including whatever reorganization might be necessary +// to get this node to the main chain) does not violate any rules. +// +// The CheckConnectBlock function makes use of this function to perform the +// bulk of its work. The only difference is this function accepts a node which +// may or may not require reorganization to connect it to the main chain whereas +// CheckConnectBlock creates a new node which specifically connects to the end +// of the current main chain and then calls this function with that node. +// +// See the comments for CheckConnectBlock for some examples of the type of +// checks performed by this function. +func (b *BlockChain) checkConnectBlock(node *blockNode, + block *dcrutil.Block) error { + // If the side chain blocks end up in the database, a call to + // CheckBlockSanity should be done here in case a previous version + // allowed a block that is no longer valid. However, since the + // implementation only currently uses memory for the side chain blocks, + // it isn't currently necessary. + parent, err := b.getPrevNodeFromNode(node) + if err != nil { + return err + } + parentBlock, err := b.getBlockFromHash(node.parentHash) + if err != nil { + return ruleError(ErrMissingParent, err.Error()) + } + + // The coinbase for the Genesis block is not spendable, so just return + // now. + if node.hash.IsEqual(b.chainParams.GenesisHash) && b.bestChain == nil { + return nil + } + + err = b.checkDupTxs(node, parent, block, parentBlock) + if err != nil { + errStr := fmt.Sprintf("checkDupTxs failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return ruleError(ErrBIP0030, errStr) + } + + // Request a map that contains all input transactions for the following + // TxTrees: + // 1) Parent TxTreeRegular (if validated) + // 2) Current TxTreeStake + // 3) Current TxTreeRegular + // These transactions are needed for verification of things such as + // transaction inputs, counting pay-to-script-hashes, and scripts. + // + // TODO This is very slow. Ideally, we should fetch once, check + // what we need to for this state, then update to the next state + // with the connect block function for a single tx store. + // Additionally, txStores for sidechains being evaluated should + // be cached so that if another block is added on top of the side + // chain, we don't have to recalculate the entire ticket store + // again resulting in O(n) behaviour per block check connect + // that can easily be O(1). + // The same going for ticket lookup, and would offer a dramatic + // improvement there as well. Blocks on extremely long side + // chains may take a very long time to validate with the current + // code, with hundreds of blocks taking hours. + regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits, + dcrutil.BlockValid) + thisNodeStakeViewpoint := ViewpointPrevInvalidStake + thisNodeRegularViewpoint := ViewpointPrevInvalidRegular + var txInputStoreInitial TxStore + + // TxStore at blockchain HEAD. + if regularTxTreeValid { + txInputStoreInitial, err = b.fetchInputTransactions(node, block, + ViewpointPrevValidInitial) + if err != nil { + log.Tracef("fetchInputTransactions failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return err + } + + thisNodeStakeViewpoint = ViewpointPrevValidStake + thisNodeRegularViewpoint = ViewpointPrevValidRegular + } + + // TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if + // validated). + txInputStoreStake, err := b.fetchInputTransactions(node, block, + thisNodeStakeViewpoint) + if err != nil { + log.Tracef("fetchInputTransactions failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return err + } + + // TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if + // validated) + TxTreeStake of current block. + txInputStoreRegular, err := b.fetchInputTransactions(node, block, + thisNodeRegularViewpoint) + if err != nil { + log.Tracef("fetchInputTransactions failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return err + } + + // Check to ensure consensus via the PoS ticketing system versus the + // informations stored in the header. + ticketStore, err := b.fetchTicketStore(node) + if err != nil { + log.Tracef("Failed to generate ticket store for incoming "+ + "node %v; error given: %v", node.hash, err) + return err + } + + err = b.CheckBlockStakeSanity(ticketStore, + b.stakeValidationHeight, + node, + block, + parentBlock, + b.chainParams) + if err != nil { + log.Tracef("CheckBlockStakeSanity failed for incoming "+ + "node %v; error given: %v", node.hash, err) + return err + } + + // The number of signature operations must be less than the maximum + // allowed per block. Note that the preliminary sanity checks on a + // block also include a check similar to this one, but this check + // expands the count to include a precise count of pay-to-script-hash + // signature operations in each of the input transaction public key + // scripts. + // Do this for all TxTrees. + if regularTxTreeValid { + err = checkP2SHNumSigOps(parentBlock.Transactions(), + txInputStoreInitial, true) + if err != nil { + return err + } + } + + err = checkP2SHNumSigOps(block.STransactions(), + txInputStoreStake, false) + if err != nil { + return err + } + + err = checkP2SHNumSigOps(block.Transactions(), + txInputStoreRegular, true) + if err != nil { + return err + } + + // Perform several checks on the inputs for each transaction. Also + // accumulate the total fees. This could technically be combined with + // the loop above instead of running another loop over the transactions, + // but by separating it we can avoid running the more expensive (though + // still relatively cheap as compared to running the scripts) checks + // against all the inputs when the signature operations are out of + // bounds. + // TxTreeRegular of previous block. + if regularTxTreeValid { + // TODO when validating the previous block, cache the stake + // fees in a node so you don't have to redo these expensive + // lookups. + parentTxTreeValid := dcrutil.IsFlagSet16(parent.header.VoteBits, + dcrutil.BlockValid) + thisParentNodeStakeViewpoint := ViewpointPrevInvalidStake + beforeSVH := node.height < b.chainParams.StakeValidationHeight + firstBlock := node.height == 1 + if parentTxTreeValid || (beforeSVH && !firstBlock) { + thisParentNodeStakeViewpoint = ViewpointPrevValidStake + } + + parentTxInputStoreStake, err := b.fetchInputTransactions(parent, + parentBlock, thisParentNodeStakeViewpoint) + if err != nil { + log.Tracef("fetchInputTransactions failed for incoming "+ + "parent node %v stake tree; error given: %v", node.hash, err) + return err + } + + stakeTreeFees, err := getStakeTreeFees(parent.height, + b.chainParams, + parentBlock.STransactions(), + parentTxInputStoreStake) + if err != nil { + log.Tracef("getStakeTreeFees failed for prev "+ + "TxTreeStake: %v", err.Error()) + return err + } + + err = b.checkTransactionInputs(stakeTreeFees, parent, + parentBlock.Transactions(), txInputStoreInitial, true) + if err != nil { + log.Tracef("checkTransactionInputs failed for prev "+ + "TxTreeRegular: %v", err.Error()) + return err + } + } + + // TxTreeStake of current block. + err = b.checkTransactionInputs(0, node, block.STransactions(), + txInputStoreStake, false) + if err != nil { + log.Tracef("checkTransactionInputs failed for cur "+ + "TxTreeStake: %v", err.Error()) + return err + } + + stakeTreeFees, err := getStakeTreeFees(node.height, + b.chainParams, + block.STransactions(), + txInputStoreStake) + if err != nil { + log.Tracef("getStakeTreeFees failed for cur "+ + "TxTreeStake: %v", err.Error()) + return err + } + + // TxTreeRegular of current block. + err = b.checkTransactionInputs(stakeTreeFees, node, block.Transactions(), + txInputStoreRegular, true) + if err != nil { + log.Tracef("checkTransactionInputs failed for cur "+ + "TxTreeRegular: %v", err.Error()) + return err } // Don't run scripts if this node is before the latest known good @@ -1113,39 +2570,52 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) er runScripts = false } - // Get the previous block node. This function is used over simply - // accessing node.parent directly as it will dynamically create previous - // block nodes as needed. This helps allow only the pieces of the chain - // that are needed to remain in memory. - prevNode, err := b.getPrevNodeFromNode(node) - if err != nil { - log.Errorf("getPrevNodeFromNode: %v", err) - return err - } - - // Blocks created after the BIP0016 activation time need to have the - // pay-to-script-hash checks enabled. - var scriptFlags txscript.ScriptFlags - if block.MsgBlock().Header.Timestamp.After(txscript.Bip16Activation) { - scriptFlags |= txscript.ScriptBip16 - } - - // Enforce DER signatures for block versions 3+ once the majority of the - // network has upgraded to the enforcement threshold. This is part of - // BIP0066. - blockHeader := &block.MsgBlock().Header - if blockHeader.Version >= 3 && b.isMajorityVersion(3, prevNode, - b.chainParams.BlockEnforceNumRequired) { - - scriptFlags |= txscript.ScriptVerifyDERSignatures - } - // Now that the inexpensive checks are done and have passed, verify the // transactions are actually allowed to spend the coins by running the // expensive ECDSA signature check scripts. Doing this last helps // prevent CPU exhaustion attacks. if runScripts { - err := checkBlockScripts(block, txInputStore, scriptFlags) + var scriptFlags txscript.ScriptFlags + scriptFlags |= txscript.ScriptBip16 + scriptFlags |= txscript.ScriptVerifyDERSignatures + scriptFlags |= txscript.ScriptVerifyStrictEncoding + scriptFlags |= txscript.ScriptVerifyMinimalData + scriptFlags |= txscript.ScriptVerifyCleanStack + scriptFlags |= txscript.ScriptVerifyCheckLockTimeVerify + + if regularTxTreeValid { + // TxTreeRegular of previous block. + err = checkBlockScripts(parentBlock, txInputStoreInitial, true, + scriptFlags) + if err != nil { + log.Tracef("checkBlockScripts failed; error "+ + "returned on txtreeregular of prev block: %v", + err.Error()) + return err + } + } + + // TxTreeStake of current block. + err = checkBlockScripts(block, txInputStoreStake, false, scriptFlags) + if err != nil { + log.Tracef("checkBlockScripts failed; error "+ + "returned on txtreestake of cur block: %v", err.Error()) + return err + } + + // TxTreeRegular of current block. + err = checkBlockScripts(block, txInputStoreRegular, true, scriptFlags) + if err != nil { + log.Tracef("checkBlockScripts failed; error "+ + "returned on txtreeregular of cur block: %v", err.Error()) + return err + } + } + + // First block has special rules concerning the ledger. + if node.height == 1 { + err := BlockOneCoinbasePaysTokens(block.Transactions()[0], + b.chainParams) if err != nil { return err } @@ -1163,10 +2633,23 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block) er // transaction script validation. // // This function is NOT safe for concurrent access. -func (b *BlockChain) CheckConnectBlock(block *btcutil.Block) error { - prevNode := b.bestChain +func (b *BlockChain) CheckConnectBlock(block *dcrutil.Block) error { + parentHash := block.MsgBlock().Header.PrevBlock + prevNode, err := b.findNode(&parentHash) + if err != nil { + return ruleError(ErrMissingParent, err.Error()) + } + + voteBitsStake := make([]uint16, 0) + for _, stx := range block.STransactions() { + if is, _ := stake.IsSSGen(stx); is { + vb := stake.GetSSGenVoteBits(stx) + voteBitsStake = append(voteBitsStake, vb) + } + } + newNode := newBlockNode(&block.MsgBlock().Header, block.Sha(), - block.Height()) + block.Height(), voteBitsStake) if prevNode != nil { newNode.parent = prevNode newNode.workSum.Add(prevNode.workSum, newNode.workSum) diff --git a/blockchain/validate_test.go b/blockchain/validate_test.go index 2589f1ca..5c670fbc 100644 --- a/blockchain/validate_test.go +++ b/blockchain/validate_test.go @@ -1,26 +1,65 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package blockchain_test import ( - "math" - "reflect" + "bytes" + "compress/bzip2" + "encoding/gob" + "encoding/hex" + "math/big" + "os" + "path/filepath" "testing" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + // "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) -// TestCheckConnectBlock tests the CheckConnectBlock function to ensure it -// fails -func TestCheckConnectBlock(t *testing.T) { +// recalculateMsgBlockMerkleRootsSize recalculates the merkle roots for a msgBlock, +// then stores them in the msgBlock's header. It also updates the block size. +func recalculateMsgBlockMerkleRootsSize(msgBlock *wire.MsgBlock) { + tempBlock := dcrutil.NewBlock(msgBlock) + + merkles := blockchain.BuildMerkleTreeStore(tempBlock.Transactions()) + merklesStake := blockchain.BuildMerkleTreeStore(tempBlock.STransactions()) + + msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + msgBlock.Header.StakeRoot = *merklesStake[len(merklesStake)-1] + msgBlock.Header.Size = uint32(msgBlock.SerializeSize()) +} + +// TestBlockValidationRules unit tests various block validation rules. +// It checks the following: +// 1. ProcessBlock +// 2. CheckWorklessBlockSanity +// 3. CheckConnectBlock +// +// The tests are done with a pregenerated simnet blockchain with two wallets +// running on it: +// +// 1: erase exodus rhythm paragraph cleanup company quiver opulent crusade +// Ohio merit recipe spheroid Pandora stairway disbelief framework component +// newborn monument tumor supportive wallet sensation standard frequency accrue +// customer stapler Burlington klaxon Medusa retouch +// +// 2: indulge hazardous bombast tobacco tunnel Pandora hockey whimsical choking +// Wilmington jawbone revival beaming Capricorn gazelle armistice beaming company +// scenic pedigree quadrant hamburger Algol Yucatan erase impetus seabird +// hemisphere drunken vacancy uncut caretaker Dupont +func TestBlockValidationRules(t *testing.T) { // Create a new database and chain instance to run tests against. - chain, teardownFunc, err := chainSetup("checkconnectblock") + chain, teardownFunc, err := chainSetup("validateunittests", + simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return @@ -34,348 +73,1922 @@ func TestCheckConnectBlock(t *testing.T) { // The genesis block should fail to connect since it's already // inserted. - genesisBlock := chaincfg.MainNetParams.GenesisBlock - err = chain.CheckConnectBlock(btcutil.NewBlock(genesisBlock)) + genesisBlock := simNetParams.GenesisBlock + err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { - t.Errorf("CheckConnectBlock: Did not received expected error") + t.Errorf("CheckConnectBlock: Did not receive expected error") } -} -// TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works -// as expected. -func TestCheckBlockSanity(t *testing.T) { - powLimit := chaincfg.MainNetParams.PowLimit - block := btcutil.NewBlock(&Block100000) + // Load up the rest of the blocks up to HEAD~1. + filename := filepath.Join("testdata/", "blocks0to168.bz2") + fi, err := os.Open(filename) + bcStream := bzip2.NewReader(fi) + defer fi.Close() + + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) + + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockChain := make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockChain); err != nil { + t.Errorf("error decoding test blockchain: %v", err.Error()) + } + + // Insert blocks 1 to 142 and perform various test. Block 1 has + // special properties, so make sure those validate correctly first. + block1Bytes := blockChain[int64(1)] timeSource := blockchain.NewMedianTime() - err := blockchain.CheckBlockSanity(block, powLimit, timeSource) + + // ---------------------------------------------------------------------------- + // ErrBlockOneOutputs 1 + // No coinbase outputs check can't trigger because it throws an error + // elsewhere. + + // ---------------------------------------------------------------------------- + // ErrBlockOneOutputs 2 + // Remove one of the premine outputs and make sure it fails. + noCoinbaseOuts1 := new(wire.MsgBlock) + noCoinbaseOuts1.FromBytes(block1Bytes) + noCoinbaseOuts1.Transactions[0].TxOut = + noCoinbaseOuts1.Transactions[0].TxOut[:2] + + recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) + b1test := dcrutil.NewBlock(noCoinbaseOuts1) + b1test.SetHeight(int64(1)) + + err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) if err != nil { - t.Errorf("CheckBlockSanity: %v", err) + t.Errorf("Got unexpected error for ErrBlockOneOutputs test 2: %v", err) } - // Ensure a block that has a timestamp with a precision higher than one - // second fails. - timestamp := block.MsgBlock().Header.Timestamp - block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond) - err = blockchain.CheckBlockSanity(block, powLimit, timeSource) - if err == nil { - t.Errorf("CheckBlockSanity: error is nil when it shouldn't be") - } -} - -// TestCheckSerializedHeight tests the checkSerializedHeight function with -// various serialized heights and also does negative tests to ensure errors -// and handled properly. -func TestCheckSerializedHeight(t *testing.T) { - // Create an empty coinbase template to be used in the tests below. - coinbaseOutpoint := wire.NewOutPoint(&wire.ShaHash{}, math.MaxUint32) - coinbaseTx := wire.NewMsgTx() - coinbaseTx.Version = 2 - coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil)) - - // Expected rule errors. - missingHeightError := blockchain.RuleError{ - ErrorCode: blockchain.ErrMissingCoinbaseHeight, - } - badHeightError := blockchain.RuleError{ - ErrorCode: blockchain.ErrBadCoinbaseHeight, + err = chain.CheckConnectBlock(b1test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBlockOneOutputs { + t.Errorf("Got no error or unexpected error for ErrBlockOneOutputs "+ + "test 2: %v", err) } - tests := []struct { - sigScript []byte // Serialized data - wantHeight int64 // Expected height - err error // Expected error type - }{ - // No serialized height length. - {[]byte{}, 0, missingHeightError}, - // Serialized height length with no height bytes. - {[]byte{0x02}, 0, missingHeightError}, - // Serialized height length with too few height bytes. - {[]byte{0x02, 0x4a}, 0, missingHeightError}, - // Serialized height that needs 2 bytes to encode. - {[]byte{0x02, 0x4a, 0x52}, 21066, nil}, - // Serialized height that needs 2 bytes to encode, but backwards - // endianness. - {[]byte{0x02, 0x4a, 0x52}, 19026, badHeightError}, - // Serialized height that needs 3 bytes to encode. - {[]byte{0x03, 0x40, 0x0d, 0x03}, 200000, nil}, - // Serialized height that needs 3 bytes to encode, but backwards - // endianness. - {[]byte{0x03, 0x40, 0x0d, 0x03}, 1074594560, badHeightError}, + // ---------------------------------------------------------------------------- + // ErrBlockOneOutputs 3 + // Bad pay to hash. + noCoinbaseOuts1 = new(wire.MsgBlock) + noCoinbaseOuts1.FromBytes(block1Bytes) + noCoinbaseOuts1.Transactions[0].TxOut[0].PkScript[8] ^= 0x01 + + recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) + b1test = dcrutil.NewBlock(noCoinbaseOuts1) + b1test.SetHeight(int64(1)) + + err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected error for ErrBlockOneOutputs test 3: %v", err) } - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - msgTx := coinbaseTx.Copy() - msgTx.TxIn[0].SignatureScript = test.sigScript - tx := btcutil.NewTx(msgTx) + err = chain.CheckConnectBlock(b1test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBlockOneOutputs { + t.Errorf("Got no error or unexpected error for ErrBlockOneOutputs "+ + "test 3: %v", err) + } - err := blockchain.TstCheckSerializedHeight(tx, test.wantHeight) - if reflect.TypeOf(err) != reflect.TypeOf(test.err) { - t.Errorf("checkSerializedHeight #%d wrong error type "+ - "got: %v <%T>, want: %T", i, err, err, test.err) - continue + // ---------------------------------------------------------------------------- + // ErrBlockOneOutputs 4 + // Bad pay to amount. + noCoinbaseOuts1 = new(wire.MsgBlock) + noCoinbaseOuts1.FromBytes(block1Bytes) + noCoinbaseOuts1.Transactions[0].TxOut[0].Value-- + + recalculateMsgBlockMerkleRootsSize(noCoinbaseOuts1) + b1test = dcrutil.NewBlock(noCoinbaseOuts1) + b1test.SetHeight(int64(1)) + + err = blockchain.CheckWorklessBlockSanity(b1test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected error for ErrBlockOneOutputs test 4: %v", err) + } + + err = chain.CheckConnectBlock(b1test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBlockOneOutputs { + t.Errorf("Got no error or unexpected error for ErrBlockOneOutputs "+ + "test 4: %v", err) + } + + // ---------------------------------------------------------------------------- + // Add the rest of the blocks up to the stake early test block. + stakeEarlyTest := 142 + for i := 1; i < stakeEarlyTest; i++ { + bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) } + bl.SetHeight(int64(i)) - if rerr, ok := err.(blockchain.RuleError); ok { - trerr := test.err.(blockchain.RuleError) - if rerr.ErrorCode != trerr.ErrorCode { - t.Errorf("checkSerializedHeight #%d wrong "+ - "error code got: %v, want: %v", i, - rerr.ErrorCode, trerr.ErrorCode) - continue + _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("ProcessBlock error: %v", err.Error()) + } + } + + // ---------------------------------------------------------------------------- + // ErrInvalidEarlyStakeTx + // There are multiple paths to this error, but here we try an early SSGen. + block142Bytes := blockChain[int64(stakeEarlyTest)] + earlySSGen142 := new(wire.MsgBlock) + earlySSGen142.FromBytes(block142Bytes) + + ssgenTx, _ := hex.DecodeString("01000000020000000000000000000000000000000" + + "000000000000000000000000000000000ffffffff00ffffffff76dfeab65ad4ca743" + + "4d5455e824c3871ed0b23ba967de53e417d9bdd7a6e42a60000000001ffffffff030" + + "00000000000000000002a6a28c5fca7895d9e1eeb7cf05755dfb5a7aa7b80b3fa8c6" + + "8c77ea3ae0dc5cd0fab198f0000000000000000000000000000000000046a02ffffe" + + "5700bb10000000000001abb76a91442f39dc794d4c68529baf41ffbd0613c16fddef" + + "a88ac000000000000000002c5220bb10000000000000000ffffffff04deadbeef204" + + "e00000000000019000000120000006b483045022100e0e8ffe608bdc274ac5aad896" + + "5faaa1a56341896fddf6470db4ea2509c71be1302207661b453473f3ce2e7b3311ef" + + "a0097d6fdbc5217e171cca04202b40d00b39e0f012103c8cbbf90d716d4840f05aef" + + "7b0232fd0dc3276219574a4919f0b26f62e3365e3") + mtxFromB := new(wire.MsgTx) + mtxFromB.FromBytes(ssgenTx) + earlySSGen142.AddSTransaction(mtxFromB) + recalculateMsgBlockMerkleRootsSize(earlySSGen142) + b142test := dcrutil.NewBlock(earlySSGen142) + b142test.SetHeight(int64(stakeEarlyTest)) + + err = blockchain.CheckWorklessBlockSanity(b142test, timeSource, simNetParams) + if err == nil { + t.Errorf("got no error for ErrInvalidEarlyStakeTx test") + } + + // Hits error here. + err = chain.CheckConnectBlock(b142test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrInvalidEarlyStakeTx { + t.Errorf("Got unexpected no error or wrong error for "+ + "ErrInvalidEarlyStakeTx test: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrInvalidEarlyVoteBits + earlyBadVoteBits42 := new(wire.MsgBlock) + earlyBadVoteBits42.FromBytes(block142Bytes) + earlyBadVoteBits42.Header.VoteBits ^= 0x80 + b142test = dcrutil.NewBlock(earlyBadVoteBits42) + b142test.SetHeight(int64(stakeEarlyTest)) + + err = blockchain.CheckWorklessBlockSanity(b142test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrInvalidEarlyVoteBits { + t.Errorf("Got unexpected no error or wrong error for "+ + "ErrInvalidEarlyVoteBits test: %v", err) + } + + // Hits error here. + err = chain.CheckConnectBlock(b142test) + if err != nil { + t.Errorf("got unexpected error for ErrInvalidEarlyVoteBits test %v", err) + } + + // ---------------------------------------------------------------------------- + // Add blocks up to the first stage of testing. + testsIdx1 := 153 + testsIdx2 := 154 + testsIdx3 := 166 + for i := stakeEarlyTest; i < testsIdx1; i++ { + bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) + } + bl.SetHeight(int64(i)) + + _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("ProcessBlock error: %v", err.Error()) + } + } + + // Make sure the last block validates. + block153, err := dcrutil.NewBlockFromBytes(blockChain[int64(testsIdx1)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) + } + block153.SetHeight(int64(testsIdx1)) + err = chain.CheckConnectBlock(block153) + if err != nil { + t.Errorf("CheckConnectBlock error: %v", err.Error()) + } + block153Bytes := blockChain[int64(testsIdx1)] + + // ---------------------------------------------------------------------------- + // ErrBadMerkleRoot 1 + // Corrupt the merkle root in tx tree regular + badMerkleRoot153 := new(wire.MsgBlock) + badMerkleRoot153.FromBytes(block153Bytes) + badMerkleRoot153.Header.MerkleRoot[0] ^= 0x01 + b153test := dcrutil.NewBlock(badMerkleRoot153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadMerkleRoot { + t.Errorf("Failed to get error or correct error for ErrBadMerkleRoot 1"+ + "test (err: %v)", err) + } + + // It hits another error on checkConnectBlock. + err = chain.CheckConnectBlock(b153test) + if err != nil { + t.Errorf("Got unexpected error for ErrBadMerkleRoot 1 test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrBadMerkleRoot 2 + // Corrupt the merkle root in tx tree stake + badMerkleRoot153 = new(wire.MsgBlock) + badMerkleRoot153.FromBytes(block153Bytes) + badMerkleRoot153.Header.StakeRoot[0] ^= 0x01 + b153test = dcrutil.NewBlock(badMerkleRoot153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadMerkleRoot { + t.Errorf("Failed to get error or correct error for ErrBadMerkleRoot 2"+ + "test (err: %v)", err) + } + + // It hits another error on checkConnectBlock. + err = chain.CheckConnectBlock(b153test) + if err != nil { + t.Errorf("Got unexpected error for ErrBadMerkleRoot 2 test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrUnexpectedDifficulty + badDifficulty153 := new(wire.MsgBlock) + badDifficulty153.FromBytes(block153Bytes) + badDifficulty153.Header.Bits = 0x207ffffe + b153test = dcrutil.NewBlock(badDifficulty153) + b153test.SetHeight(int64(testsIdx1)) + + _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrUnexpectedDifficulty { + t.Errorf("Failed to get error or correct error for "+ + "ErrUnexpectedDifficulty test (err: %v)", err) + } + + // ---------------------------------------------------------------------------- + // ErrWrongBlockSize + badBlockSize153 := new(wire.MsgBlock) + badBlockSize153.FromBytes(block153Bytes) + badBlockSize153.Header.Size = 0x20ffff71 + b153test = dcrutil.NewBlock(badBlockSize153) + b153test.SetHeight(int64(testsIdx1)) + + _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNoPoWCheck) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrWrongBlockSize { + t.Errorf("Failed to get error or correct error for "+ + "ErrWrongBlockSize test (err: %v)", err) + } + + // ---------------------------------------------------------------------------- + // ErrHighHash + badHash153 := new(wire.MsgBlock) + badHash153.FromBytes(block153Bytes) + badHash153.Header.Size = 0x20ffff70 + b153test = dcrutil.NewBlock(badHash153) + b153test.SetHeight(int64(testsIdx1)) + + _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrHighHash { + t.Errorf("Failed to get error or correct error for "+ + "ErrHighHash test (err: %v)", err) + } + + // ---------------------------------------------------------------------------- + // ErrMissingParent + missingParent153 := new(wire.MsgBlock) + missingParent153.FromBytes(block153Bytes) + missingParent153.Header.PrevBlock[8] ^= 0x01 + b153test = dcrutil.NewBlock(missingParent153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected sanity error for ErrMissingParent test: %v", + err) + } + + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrMissingParent { + t.Errorf("Got no or unexpected error for ErrMissingParent test %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrBadCoinbaseValue + badSubsidy153 := new(wire.MsgBlock) + badSubsidy153.FromBytes(block153Bytes) + badSubsidy153.Transactions[0].TxOut[2].Value++ + recalculateMsgBlockMerkleRootsSize(badSubsidy153) + b153test = dcrutil.NewBlock(badSubsidy153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected sanity error for ErrBadCoinbaseValue test: %v", + err) + } + + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadCoinbaseValue { + t.Errorf("Got no or unexpected error for ErrBadCoinbaseValue test %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrBadCoinbaseOutpoint/ErrFirstTxNotCoinbase + // Seems impossible to hit this because ErrFirstTxNotCoinbase is hit first. + badCBOutpoint153 := new(wire.MsgBlock) + badCBOutpoint153.FromBytes(block153Bytes) + badCBOutpoint153.Transactions[0].TxIn[0].PreviousOutPoint.Hash[0] ^= 0x01 + recalculateMsgBlockMerkleRootsSize(badCBOutpoint153) + b153test = dcrutil.NewBlock(badCBOutpoint153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrFirstTxNotCoinbase { + t.Errorf("Got no or unexpected sanity error for "+ + "ErrBadCoinbaseOutpoint test: %v", err) + } + + err = chain.CheckConnectBlock(b153test) + if err == nil { + t.Errorf("Got unexpected no error for ErrBadCoinbaseOutpoint test") + } + + // ---------------------------------------------------------------------------- + // ErrBadCoinbaseFraudProof + badCBFraudProof153 := new(wire.MsgBlock) + badCBFraudProof153.FromBytes(block153Bytes) + badCBFraudProof153.Transactions[0].TxIn[0].BlockHeight = 0x12345678 + recalculateMsgBlockMerkleRootsSize(badCBFraudProof153) + b153test = dcrutil.NewBlock(badCBFraudProof153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadCoinbaseFraudProof { + t.Errorf("Got no or unexpected sanity error for "+ + "ErrBadCoinbaseFraudProof test: %v", err) + } + + err = chain.CheckConnectBlock(b153test) + if err != nil { + t.Errorf("Got unexpected error for ErrBadCoinbaseFraudProof test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrBadCoinbaseAmountIn + badCBAmountIn153 := new(wire.MsgBlock) + badCBAmountIn153.FromBytes(block153Bytes) + badCBAmountIn153.Transactions[0].TxIn[0].ValueIn = 0x1234567890123456 + recalculateMsgBlockMerkleRootsSize(badCBAmountIn153) + b153test = dcrutil.NewBlock(badCBAmountIn153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected error for ErrBadCoinbaseFraudProof test: %v", + err) + } + + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadCoinbaseAmountIn { + t.Errorf("Got no or unexpected sanity error for "+ + "ErrBadCoinbaseAmountIn test: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrBadStakebaseAmountIn + badSBAmountIn153 := new(wire.MsgBlock) + badSBAmountIn153.FromBytes(block153Bytes) + badSBAmountIn153.STransactions[0].TxIn[0].ValueIn = 0x1234567890123456 + recalculateMsgBlockMerkleRootsSize(badSBAmountIn153) + b153test = dcrutil.NewBlock(badSBAmountIn153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected error for ErrBadCoinbaseFraudProof test: %v", + err) + } + + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadStakebaseAmountIn { + t.Errorf("Got no or unexpected sanity error for "+ + "ErrBadCoinbaseAmountIn test: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrRegTxInStakeTree + // Break an SSGen by giving it a non-null outpoint. + badStakebaseOutpoint153 := new(wire.MsgBlock) + badStakebaseOutpoint153.FromBytes(block153Bytes) + badOPHash, _ := chainhash.NewHash(bytes.Repeat([]byte{0x01}, 32)) + badStakebaseOutpoint153.STransactions[0].TxIn[0].PreviousOutPoint.Hash = + *badOPHash + + recalculateMsgBlockMerkleRootsSize(badStakebaseOutpoint153) + badStakebaseOutpoint153.Header.Voters-- + b153test = dcrutil.NewBlock(badStakebaseOutpoint153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrRegTxInStakeTree { + t.Errorf("Failed to get error or correct error for ErrRegTxInStakeTree "+ + "test (err: %v)", err) + } + + // It hits another error on checkConnectBlock. + err = chain.CheckConnectBlock(b153test) + if err == nil { + t.Errorf("Got unexpected no error for ErrRegTxInStakeTree test") + } + + // ---------------------------------------------------------------------------- + // ErrStakeTxInRegularTree + // Stick an SSGen in TxTreeRegular. + ssgenInRegular153 := new(wire.MsgBlock) + ssgenInRegular153.FromBytes(block153Bytes) + ssgenInRegular153.AddTransaction(ssgenInRegular153.STransactions[4]) + ssgenInRegular153.STransactions = ssgenInRegular153.STransactions[0:3] + ssgenInRegular153.Header.Voters -= 2 + + recalculateMsgBlockMerkleRootsSize(ssgenInRegular153) + b153test = dcrutil.NewBlock(ssgenInRegular153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrStakeTxInRegularTree { + t.Errorf("Failed to get error or correct error for ErrRegTxInStakeTree "+ + "test (err: %v)", err) + } + + // Throws bad subsidy error too. + err = chain.CheckConnectBlock(b153test) + if err == nil { + t.Errorf("Got unexpected no error for ErrStakeTxInRegularTree test") + } + + // ---------------------------------------------------------------------------- + // ErrBadStakebaseScriptLen + badStakebaseSS153 := new(wire.MsgBlock) + badStakebaseSS153.FromBytes(block153Bytes) + badStakebaseSS := bytes.Repeat([]byte{0x01}, 256) + badStakebaseSS153.STransactions[0].TxIn[0].SignatureScript = + badStakebaseSS + recalculateMsgBlockMerkleRootsSize(badStakebaseSS153) + b153test = dcrutil.NewBlock(badStakebaseSS153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadStakebaseScriptLen { + t.Errorf("Failed to get error or correct error for bad stakebase "+ + "script len test (err: %v)", err) + } + + // This otherwise passes the checks. + err = chain.CheckConnectBlock(b153test) + if err != nil { + t.Errorf("Unexpected error for bad stakebase script len test: %v", + err.Error()) + } + + // ---------------------------------------------------------------------------- + // ErrBadStakevaseScrVal + badStakebaseScr153 := new(wire.MsgBlock) + badStakebaseScr153.FromBytes(block153Bytes) + badStakebaseScr153.STransactions[0].TxIn[0].SignatureScript[0] ^= 0x01 + recalculateMsgBlockMerkleRootsSize(badStakebaseScr153) + b153test = dcrutil.NewBlock(badStakebaseScr153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadStakevaseScrVal { + t.Errorf("Failed to get error or correct error for bad stakebase "+ + "script test (err: %v)", err) + } + + // This otherwise passes the checks. + err = chain.CheckConnectBlock(b153test) + if err != nil { + t.Errorf("Unexpected error for bad stakebase script test: %v", + err.Error()) + } + + // ---------------------------------------------------------------------------- + // ErrInvalidRevocations + badSSRtxNum153 := new(wire.MsgBlock) + badSSRtxNum153.FromBytes(block153Bytes) + badSSRtxNum153.Header.Revocations = 2 + + b153test = dcrutil.NewBlock(badSSRtxNum153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrInvalidRevocations sanity check: %v", + err) + } + + // Fails and hits ErrInvalidRevocations. + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrInvalidRevNum { + t.Errorf("Unexpected no or wrong error for ErrInvalidRevocations test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrSSRtxPayeesMismatch + // Add an extra txout to the revocation. + ssrtxPayeesMismatch153 := new(wire.MsgBlock) + ssrtxPayeesMismatch153.FromBytes(block153Bytes) + ssrtxPayeesMismatch153.STransactions[5].TxOut = append( + ssrtxPayeesMismatch153.STransactions[5].TxOut, + ssrtxPayeesMismatch153.STransactions[5].TxOut[0]) + + recalculateMsgBlockMerkleRootsSize(ssrtxPayeesMismatch153) + b153test = dcrutil.NewBlock(ssrtxPayeesMismatch153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSSRtxPayeesMismatch sanity "+ + "check: %v", err) + } + + // Fails and hits ErrSSRtxPayeesMismatch. + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSSRtxPayeesMismatch { + t.Errorf("Unexpected no or wrong error for ErrSSRtxPayeesMismatch "+ + "test: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrSSRtxPayees 1 + // Corrupt the PKH it pays out to. + badSSRtxPayee153 := new(wire.MsgBlock) + badSSRtxPayee153.FromBytes(block153Bytes) + badSSRtxPayee153.STransactions[5].TxOut[0].PkScript[8] ^= 0x01 + + recalculateMsgBlockMerkleRootsSize(badSSRtxPayee153) + b153test = dcrutil.NewBlock(badSSRtxPayee153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSSRtxPayees sanity "+ + "check 1: %v", err) + } + + // Fails and hits ErrSSRtxPayees. + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSSRtxPayees { + t.Errorf("Unexpected no or wrong error for ErrSSRtxPayees "+ + "test 1: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrSSRtxPayees 2 + // Corrupt the amount. The transaction can pay (0 ... 20000) and still be + // valid because with the sstxOut.Version set to 0x5400 we can have fees up + // to 2^20 for any SSRtx output. + badSSRtxPayee153 = new(wire.MsgBlock) + badSSRtxPayee153.FromBytes(block153Bytes) + badSSRtxPayee153.STransactions[5].TxOut[0].Value = 20001 + + recalculateMsgBlockMerkleRootsSize(badSSRtxPayee153) + b153test = dcrutil.NewBlock(badSSRtxPayee153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSSRtxPayees sanity "+ + "check 2: %v", err) + } + + // Fails and hits ErrSSRtxPayees. + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSSRtxPayees { + t.Errorf("Unexpected no or wrong error for ErrSSRtxPayees "+ + "test 2: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrInvalidSSRtx + invalidSSRtxFor153, _ := hex.DecodeString("0100000001e081ca7481ed46de39e528" + + "8a45b6a3f86c478a6ebc60a4b701c75c1bc900ea8a0000000001ffffffff01db040100" + + "0000000000001abc76a914a495e69ddfe8b9770b823314ba66d4ca0620131088ac0000" + + "00000000000001542c79000000000076000000010000006b483045022100d5b06e2f35" + + "b73eeed8331a482c0b45ab3dc1bd98574ae79afbb80853bdac4735022012ea4ce6177c" + + "76e4d7e9aca0d06978cdbcbed163a89d7fffa5297968227914e90121033147afc0d065" + + "9798f602c92aef634aaffc0a82759b9d0654a5d04c28f3451f76") + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(invalidSSRtxFor153) + + badSSRtx153 := new(wire.MsgBlock) + badSSRtx153.FromBytes(block153Bytes) + badSSRtx153.AddSTransaction(mtxFromB) + badSSRtx153.Header.Revocations = 1 + + recalculateMsgBlockMerkleRootsSize(badSSRtx153) + b153test = dcrutil.NewBlock(badSSRtx153) + b153test.SetHeight(int64(testsIdx1)) + + err = blockchain.CheckWorklessBlockSanity(b153test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrInvalidSSRtx sanity check: %v", + err) + } + + // Fails and hits ErrInvalidSSRtx. + err = chain.CheckConnectBlock(b153test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrInvalidSSRtx { + t.Errorf("Unexpected no or wrong error for ErrInvalidSSRtx test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // Insert block 154 and continue testing + block153MsgBlock := new(wire.MsgBlock) + block153MsgBlock.FromBytes(block153Bytes) + b153test = dcrutil.NewBlock(block153MsgBlock) + b153test.SetHeight(int64(testsIdx1)) + _, _, err = chain.ProcessBlock(b153test, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("Got unexpected error processing block 153", err) + } + block154Bytes := blockChain[int64(testsIdx2)] + block154MsgBlock := new(wire.MsgBlock) + block154MsgBlock.FromBytes(block154Bytes) + b154test := dcrutil.NewBlock(block154MsgBlock) + b154test.SetHeight(int64(testsIdx2)) + + // The incoming block should pass fine. + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("Unexpected error for check block 154 sanity: %v", err.Error()) + } + + err = chain.CheckConnectBlock(b154test) + if err != nil { + t.Errorf("Unexpected error for check block 154 connect: %v", err.Error()) + } + + // ---------------------------------------------------------------------------- + // ErrNotEnoughStake + notEnoughStake154 := new(wire.MsgBlock) + notEnoughStake154.FromBytes(block154Bytes) + notEnoughStake154.STransactions[5].TxOut[0].Value-- + notEnoughStake154.AddSTransaction(mtxFromB) + recalculateMsgBlockMerkleRootsSize(notEnoughStake154) + b154test = dcrutil.NewBlock(notEnoughStake154) + b154test.SetHeight(int64(testsIdx2)) + + // This fails both checks. + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrNotEnoughStake { + t.Errorf("Failed to get error or correct error for low stake amt "+ + "test (err: %v)", err) + } + + // Throws an error in stake consensus. + err = chain.CheckConnectBlock(b154test) + if err == nil { + t.Errorf("Unexpected error for low stake amt test: %v", err.Error()) + } + + // ---------------------------------------------------------------------------- + // ErrFreshStakeMismatch + badFreshStake154 := new(wire.MsgBlock) + badFreshStake154.FromBytes(block154Bytes) + badFreshStake154.Header.FreshStake++ + recalculateMsgBlockMerkleRootsSize(badFreshStake154) + b154test = dcrutil.NewBlock(badFreshStake154) + b154test.SetHeight(int64(testsIdx2)) + + // This passes. + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("Unexpected error for ErrFreshStakeMismatch test: %v", + err.Error()) + } + + // Throws an error in stake consensus. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrFreshStakeMismatch { + t.Errorf("Unexpected no error or wrong err for ErrFreshStakeMismatch "+ + "test: %v", err.Error()) + } + + // ---------------------------------------------------------------------------- + // ErrStakeBelowMinimum still needs to be tested, can't on this blockchain + // because it's above minimum and it'll always trigger failure on that + // condition first. + + // ---------------------------------------------------------------------------- + // ErrNotEnoughVotes + notEnoughVotes154 := new(wire.MsgBlock) + notEnoughVotes154.FromBytes(block154Bytes) + notEnoughVotes154.STransactions = notEnoughVotes154.STransactions[0:2] + recalculateMsgBlockMerkleRootsSize(notEnoughVotes154) + b154test = dcrutil.NewBlock(notEnoughVotes154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("Got unexpected block sanity err for "+ + "not enough votes (err: %v)", err) + } + + // Fails and hits ErrNotEnoughVotes. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrNotEnoughVotes { + t.Errorf("Unexpected no or wrong error for not enough votes test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrTooManyVotes + invalidSSGenFor154, _ := hex.DecodeString("0100000002000000000000000000000" + + "0000000000000000000000000000000000000000000ffffffff00ffffffff9a4fc238" + + "0060cd86a65620f43af5d641a15c11cba8a3b41cb0f87c2e5795ef590000000001fff" + + "fffff0300000000000000000000266a241cf1d119f9443cd651ef6ff263b561d77b27" + + "426e6767f3a853a2370d588ccf119800000000000000000000000000046a02ffffe57" + + "00bb10000000000001abb76a914e9c66c96902aa5ea1dae549e8bdc01ebc8ff7ae488" + + "ac000000000000000002c5220bb10000000000000000ffffffff04deadbeef204e000" + + "00000000037000000020000006a4730440220329517d0216a0825843e41030f40167e" + + "1a71f7b23986eedab83ad6eaa9aec07f022029c6c808dc18ad59454985108dfeef1c1" + + "a1f1753d07bc5041bb133d0400d294e0121032e1e80b402627c3d60789e8b52d20ae6" + + "c05768c9c8d0a296b4ae6043a1e6a0c1") + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(invalidSSGenFor154) + + tooManyVotes154 := new(wire.MsgBlock) + tooManyVotes154.FromBytes(block154Bytes) + tooManyVotes154.AddSTransaction(mtxFromB) + tooManyVotes154.Header.Voters = 6 + + recalculateMsgBlockMerkleRootsSize(tooManyVotes154) + b154test = dcrutil.NewBlock(tooManyVotes154) + b154test.SetHeight(int64(testsIdx2)) + + // Fails tax amount test. + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err == nil { + t.Errorf("got unexpected no error for ErrTooManyVotes sanity check") + } + + // Fails and hits ErrTooManyVotes. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrTooManyVotes { + t.Errorf("Unexpected no or wrong error for too many votes test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrTicketUnavailable + nonChosenTicket154 := new(wire.MsgBlock) + nonChosenTicket154.FromBytes(block154Bytes) + nonChosenTicket154.STransactions[4] = mtxFromB + + recalculateMsgBlockMerkleRootsSize(nonChosenTicket154) + b154test = dcrutil.NewBlock(nonChosenTicket154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrTicketUnavailable sanity check"+ + ": %v", + err) + } + + // Fails and hits ErrTooManyVotes. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrTicketUnavailable { + t.Errorf("Unexpected no or wrong error for ErrTicketUnavailable test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrVotesOnWrongBlock + wrongBlockVote154 := new(wire.MsgBlock) + wrongBlockVote154.FromBytes(block154Bytes) + wrongBlockScript, _ := hex.DecodeString("6a24008e029f92ae880d45ae61a5366b" + + "b81d9903c5e61045c5b17f1bc97260f8e54497000000") + wrongBlockVote154.STransactions[0].TxOut[0].PkScript = wrongBlockScript + + recalculateMsgBlockMerkleRootsSize(wrongBlockVote154) + b154test = dcrutil.NewBlock(wrongBlockVote154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrVotesOnWrongBlock sanity check: %v", + err) + } + + // Fails and hits ErrTooManyVotes. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrVotesOnWrongBlock { + t.Errorf("Unexpected no or wrong error for ErrVotesOnWrongBlock test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrVotesMismatch + votesMismatch154 := new(wire.MsgBlock) + votesMismatch154.FromBytes(block154Bytes) + sstxsIn154 := votesMismatch154.STransactions[5:] + votesMismatch154.STransactions = votesMismatch154.STransactions[0:4] // 4 Votes + votesMismatch154.STransactions = append(votesMismatch154.STransactions, + sstxsIn154...) + recalculateMsgBlockMerkleRootsSize(votesMismatch154) + b154test = dcrutil.NewBlock(votesMismatch154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrVotesMismatch sanity check: %v", + err) + } + + // Fails and hits ErrVotesMismatch. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrVotesMismatch { + t.Errorf("Unexpected no or wrong error for ErrVotesMismatch test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 1 + // Everyone votes Yea, but block header says Nay + badVoteBit154 := new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.Header.VoteBits &= 0xFFFE // Zero critical voteBit + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 2 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 1: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 2 + // Everyone votes Nay, but block header says Yea + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.Header.VoteBits = 0x0001 + for i, stx := range badVoteBit154.STransactions { + if i < 5 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 2 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 2: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 3 + // 3x Nay 2x Yea, but block header says Yea + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.Header.VoteBits = 0x0001 + for i, stx := range badVoteBit154.STransactions { + if i < 3 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 3 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 3: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 4 + // 2x Nay 3x Yea, but block header says Nay + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.Header.VoteBits = 0x0000 + for i, stx := range badVoteBit154.STransactions { + if i < 2 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 4 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 4: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 5 + // 4x Voters + // 2x Nay 2x Yea, but block header says Yea + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.STransactions = badVoteBit154.STransactions[0:4] // 4 Votes + badVoteBit154.Header.FreshStake = 0 + badVoteBit154.Header.VoteBits = 0x0001 + badVoteBit154.Header.Voters = 4 + badVoteBit154.Transactions[0].TxOut[0].Value = 3960396039 + for i, stx := range badVoteBit154.STransactions { + if i < 2 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 5 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 5: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 6 + // 3x Voters + // 2x Nay 1x Yea, but block header says Yea + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.STransactions = badVoteBit154.STransactions[0:3] + badVoteBit154.Header.FreshStake = 0 + badVoteBit154.Header.VoteBits = 0x0001 + badVoteBit154.Header.Voters = 3 + badVoteBit154.Transactions[0].TxOut[0].Value = 2970297029 + for i, stx := range badVoteBit154.STransactions { + if i < 2 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 6 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 6: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrIncongruentVotebit 7 + // 3x Voters + // 1x Nay 2x Yea, but block header says Nay + badVoteBit154 = new(wire.MsgBlock) + badVoteBit154.FromBytes(block154Bytes) + badVoteBit154.STransactions = badVoteBit154.STransactions[0:3] + badVoteBit154.Header.FreshStake = 0 + badVoteBit154.Header.VoteBits = 0x0000 + badVoteBit154.Header.Voters = 3 + badVoteBit154.Transactions[0].TxOut[0].Value = 2970297029 + for i, stx := range badVoteBit154.STransactions { + if i < 1 { + // VoteBits is encoded little endian. + stx.TxOut[1].PkScript[2] = 0x00 + } + } + recalculateMsgBlockMerkleRootsSize(badVoteBit154) + b154test = dcrutil.NewBlock(badVoteBit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrIncongruentVotebit 7 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrIncongruentVotebit. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrIncongruentVotebit { + t.Errorf("Unexpected no or wrong error for ErrIncongruentVotebit "+ + "test 7: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrSStxCommitment + badCommitScrB, _ := hex.DecodeString("6a1ea495e69ddfe8b9770b823314ba66d4ca0" + + "6201310540cce08000000001234") + + badSStxCommit154 := new(wire.MsgBlock) + badSStxCommit154.FromBytes(block154Bytes) + badSStxCommit154.STransactions[5].TxOut[1].PkScript = badCommitScrB + + recalculateMsgBlockMerkleRootsSize(badSStxCommit154) + b154test = dcrutil.NewBlock(badSStxCommit154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSStxCommitment sanity check: %v", + err) + } + + // Fails and hits ErrSStxCommitment. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSStxCommitment { + t.Errorf("Unexpected no or wrong error for ErrSStxCommitment test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrUnparseableSSGen + // This should be impossible to hit unless there's a local memory failure. + + // ---------------------------------------------------------------------------- + // ErrInvalidSSGenInput + // It doesn't look like this one can actually be hit since checking if + // IsSSGen should fail first. + + // ---------------------------------------------------------------------------- + // ErrSSGenPayeeOuts 1 + // Corrupt the payee + badSSGenPayee154 := new(wire.MsgBlock) + badSSGenPayee154.FromBytes(block154Bytes) + badSSGenPayee154.STransactions[0].TxOut[2].PkScript[8] ^= 0x01 + + recalculateMsgBlockMerkleRootsSize(badSSGenPayee154) + b154test = dcrutil.NewBlock(badSSGenPayee154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSSGenPayeeOuts sanity "+ + "check: %v", err) + } + + // Fails and hits ErrSSGenPayeeOuts. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSSGenPayeeOuts { + t.Errorf("Unexpected no or wrong error for ErrSSGenPayeeOuts "+ + "test: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrSSGenPayeeOuts 2 + // Corrupt the amount + badSSGenPayee154 = new(wire.MsgBlock) + badSSGenPayee154.FromBytes(block154Bytes) + badSSGenPayee154.STransactions[0].TxOut[2].Value += 1 + + recalculateMsgBlockMerkleRootsSize(badSSGenPayee154) + b154test = dcrutil.NewBlock(badSSGenPayee154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrSSGenPayeeOuts sanity "+ + "check2 : %v", err) + } + + // Fails and hits ErrSSGenPayeeOuts. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrSSGenPayeeOuts { + t.Errorf("Unexpected no or wrong error for ErrSSGenPayeeOuts "+ + "test 2: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrSSGenSubsidy + // It appears that ErrSSGenSubsidy is impossible to hit due to the + // check above that returns ErrSSGenPayeeOuts. + + // ---------------------------------------------------------------------------- + // ErrSStxInImmature + // This is impossible to hit from a block's perspective because the + // ticket isn't in the ticket database. So it fails prematurely. + + // ---------------------------------------------------------------------------- + // ErrSStxInScrType + // The testbed blockchain doesn't have any non-P2PKH or non-P2SH outputs + // so we can't test this. Independently tested and verified, but should + // eventually get its own unit test. + + // ---------------------------------------------------------------------------- + // ErrInvalidSSRtxInput + // It seems impossible to hit this from a block test because it fails when + // it can't detect the relevant tickets in the missed ticket database + // bucket. + + // ---------------------------------------------------------------------------- + // ErrTxSStxOutSpend + // Try to spend a ticket output as a regular transaction. + spendTaggedIn154 := new(wire.MsgBlock) + spendTaggedIn154.FromBytes(block154Bytes) + regularTx154, _ := spendTaggedIn154.Transactions[11].Bytes() + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + sstxTaggedInH, _ := chainhash.NewHashFromStr("83a562e29aad50b8aacb816914da" + + "92a3fa46bea9e8f30b69efc6e64b455f0436") + sstxTaggedIn := new(wire.TxIn) + sstxTaggedIn.BlockHeight = 71 + sstxTaggedIn.BlockIndex = 1 + sstxTaggedIn.ValueIn = 20000 + sstxTaggedIn.SignatureScript = []byte{0x51, 0x51} + sstxTaggedIn.Sequence = 0xffffffff + sstxTaggedIn.PreviousOutPoint.Hash = *sstxTaggedInH + sstxTaggedIn.PreviousOutPoint.Index = 0 + sstxTaggedIn.PreviousOutPoint.Tree = 1 + mtxFromB.AddTxIn(sstxTaggedIn) + + spendTaggedIn154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(spendTaggedIn154) + b154test = dcrutil.NewBlock(spendTaggedIn154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrTxSStxOutSpend sanity check: %v", + err) + } + + // Fails and hits ErrTxSStxOutSpend. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrTxSStxOutSpend { + t.Errorf("Unexpected no or wrong error for ErrTxSStxOutSpend test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrRegTxSpendStakeOut + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + scrWithStakeOPCode, _ := hex.DecodeString("ba76a9149fe1d1f7ed3b1d0be66c4b3c" + + "4981ca48b810e9bb88ac") + mtxFromB.TxOut[0].PkScript = scrWithStakeOPCode + + spendTaggedOut154 := new(wire.MsgBlock) + spendTaggedOut154.FromBytes(block154Bytes) + spendTaggedOut154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(spendTaggedOut154) + b154test = dcrutil.NewBlock(spendTaggedOut154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrRegTxSpendStakeOut sanity check: %v", + err) + } + + // Fails and hits ErrRegTxSpendStakeOut. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrRegTxSpendStakeOut { + t.Errorf("Unexpected no or wrong error for ErrRegTxSpendStakeOut test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrInvalidFinalState + badFinalState154 := new(wire.MsgBlock) + badFinalState154.FromBytes(block154Bytes) + badFinalState154.Header.FinalState[0] ^= 0x01 + b154test = dcrutil.NewBlock(badFinalState154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrInvalidFinalState sanity check: %v", + err) + } + + // Fails and hits ErrInvalidFinalState. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrInvalidFinalState { + t.Errorf("Unexpected no or wrong error for ErrInvalidFinalState test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrPoolSize + badPoolSize154 := new(wire.MsgBlock) + badPoolSize154.FromBytes(block154Bytes) + badPoolSize154.Header.PoolSize++ + b154test = dcrutil.NewBlock(badPoolSize154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrPoolSize sanity check: %v", + err) + } + + // Fails and hits ErrPoolSize. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrPoolSize { + t.Errorf("Unexpected no or wrong error for ErrPoolSize test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrBadStakebaseValue doesn't seem be be able to be hit because + // ErrSSGenPayeeOuts is hit first. The code should be kept in in case + // the first check somehow fails to catch inflation. + + // ---------------------------------------------------------------------------- + // ErrDiscordantTxTree + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxIn[0].PreviousOutPoint.Tree = dcrutil.TxTreeStake + + errTxTreeIn154 := new(wire.MsgBlock) + errTxTreeIn154.FromBytes(block154Bytes) + errTxTreeIn154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(errTxTreeIn154) + b154test = dcrutil.NewBlock(errTxTreeIn154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrDiscordantTxTree sanity check: %v", + err) + } + + // Fails and hits ErrDiscordantTxTree. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrDiscordantTxTree { + t.Errorf("Unexpected no or wrong error for ErrDiscordantTxTree test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrStakeFees + // It should be impossible for this to ever be triggered because of the + // paranoid around transaction inflation, but leave it in anyway just + // in case there is database corruption etc. + + // ---------------------------------------------------------------------------- + // ErrBadBlockHeight + badBlockHeight154 := new(wire.MsgBlock) + badBlockHeight154.FromBytes(block154Bytes) + badBlockHeight154.Header.Height++ + b154test = dcrutil.NewBlock(badBlockHeight154) + b154test.SetHeight(int64(testsIdx2)) + + // Throws ProcessBlock error through checkBlockContext. + _, _, err = chain.ProcessBlock(b154test, timeSource, blockchain.BFNoPoWCheck) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrBadBlockHeight { + t.Errorf("ProcessBlock ErrBadBlockHeight test no or unexpected "+ + "error: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrNoTax 1 + // Tax output missing + taxMissing154 := new(wire.MsgBlock) + taxMissing154.FromBytes(block154Bytes) + taxMissing154.Transactions[0].TxOut = taxMissing154.Transactions[0].TxOut[1:] + + recalculateMsgBlockMerkleRootsSize(taxMissing154) + b154test = dcrutil.NewBlock(taxMissing154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrNoTax { + t.Errorf("Got no error or unexpected error for ErrNoTax "+ + "test 1: %v", err) + } + + err = chain.CheckConnectBlock(b154test) + if err != nil { + t.Errorf("Got unexpected error for ErrNoTax test 1: %v", err) + } + + // ErrNoTax 2 + // Wrong hash paid to + taxMissing154 = new(wire.MsgBlock) + taxMissing154.FromBytes(block154Bytes) + taxMissing154.Transactions[0].TxOut[0].PkScript[8] ^= 0x01 + + recalculateMsgBlockMerkleRootsSize(taxMissing154) + b154test = dcrutil.NewBlock(taxMissing154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrNoTax { + t.Errorf("Got no error or unexpected error for ErrNoTax "+ + "test 2: %v", err) + } + + err = chain.CheckConnectBlock(b154test) + if err != nil { + t.Errorf("Got unexpected error for ErrNoTax test 2: %v", err) + } + + // ErrNoTax 3 + // Wrong amount paid + taxMissing154 = new(wire.MsgBlock) + taxMissing154.FromBytes(block154Bytes) + taxMissing154.Transactions[0].TxOut[0].Value-- + + recalculateMsgBlockMerkleRootsSize(taxMissing154) + b154test = dcrutil.NewBlock(taxMissing154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrNoTax { + t.Errorf("Got no error or unexpected error for ErrNoTax "+ + "test 3: %v", err) + } + + err = chain.CheckConnectBlock(b154test) + if err != nil { + t.Errorf("Got unexpected error for ErrNoTax test 3: %v", err) + } + + // ---------------------------------------------------------------------------- + // ErrExpiredTx + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.Expiry = 154 + + expiredTx154 := new(wire.MsgBlock) + expiredTx154.FromBytes(block154Bytes) + expiredTx154.AddTransaction(mtxFromB) + recalculateMsgBlockMerkleRootsSize(expiredTx154) + b154test = dcrutil.NewBlock(expiredTx154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrExpiredTx sanity check: %v", + err) + } + + // Fails and hits ErrExpiredTx. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrExpiredTx { + t.Errorf("Unexpected no or wrong error for ErrExpiredTx test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrFraudAmountIn + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxIn[0].ValueIn-- + + badValueIn154 := new(wire.MsgBlock) + badValueIn154.FromBytes(block154Bytes) + badValueIn154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(badValueIn154) + b154test = dcrutil.NewBlock(badValueIn154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrFraudAmountIn sanity check: %v", + err) + } + + // Fails and hits ErrFraudAmountIn. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrFraudAmountIn { + t.Errorf("Unexpected no or wrong error for ErrFraudAmountIn test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrFraudBlockHeight + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxIn[0].BlockHeight++ + + badHeightProof154 := new(wire.MsgBlock) + badHeightProof154.FromBytes(block154Bytes) + badHeightProof154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(badHeightProof154) + b154test = dcrutil.NewBlock(badHeightProof154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrFraudBlockHeight sanity check: %v", + err) + } + + // Fails and hits ErrFraudBlockHeight. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrFraudBlockHeight { + t.Errorf("Unexpected no or wrong error for ErrFraudBlockHeight test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrFraudBlockIndex + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxIn[0].BlockIndex++ + + badIndexProof154 := new(wire.MsgBlock) + badIndexProof154.FromBytes(block154Bytes) + badIndexProof154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(badIndexProof154) + b154test = dcrutil.NewBlock(badIndexProof154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrFraudBlockIndex sanity check: %v", + err) + } + + // Fails and hits ErrFraudBlockIndex. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrFraudBlockIndex { + t.Errorf("Unexpected no or wrong error for ErrFraudBlockIndex test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrScriptValidation Reg Tree + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxOut[0].Value-- + + badScrVal154 := new(wire.MsgBlock) + badScrVal154.FromBytes(block154Bytes) + badScrVal154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(badScrVal154) + b154test = dcrutil.NewBlock(badScrVal154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrScriptValidation sanity check: %v", + err) + } + + // Fails and hits ErrScriptValidation. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrScriptValidation { + t.Errorf("Unexpected no or wrong error for ErrScriptValidation test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrScriptValidation Stake Tree + badScrValS154 := new(wire.MsgBlock) + badScrValS154.FromBytes(block154Bytes) + badScrValS154.STransactions[5].TxIn[0].SignatureScript[6] ^= 0x01 + recalculateMsgBlockMerkleRootsSize(badScrValS154) + b154test = dcrutil.NewBlock(badScrValS154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrScriptValidation sanity check: %v", + err) + } + + // Fails and hits ErrScriptValidation. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrScriptValidation { + t.Errorf("Unexpected no or wrong error for ErrScriptValidation test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // Invalidate the previous block transaction tree. All the tickets in + // this block reference the previous transaction tree regular, and so + // all should be invalid by missing the tx if the header invalidates the + // previous block. + invalMissingInsS154 := new(wire.MsgBlock) + invalMissingInsS154.FromBytes(block154Bytes) + for i := 0; i < int(invalMissingInsS154.Header.Voters); i++ { + invalMissingInsS154.STransactions[i].TxOut[1].PkScript[2] = 0x00 + } + invalMissingInsS154.Header.VoteBits = 0x0000 + + recalculateMsgBlockMerkleRootsSize(invalMissingInsS154) + b154test = dcrutil.NewBlock(invalMissingInsS154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for invalMissingInsS154 sanity check: %v", + err) + } + + // Fails and hits ErrMissingTx. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrMissingTx { + t.Errorf("Unexpected no or wrong error for invalMissingInsS154 test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrScriptMalformed + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + mtxFromB.TxOut[0].PkScript = []byte{0x01, 0x02, 0x03, 0x04} + + malformedScr154 := new(wire.MsgBlock) + malformedScr154.FromBytes(block154Bytes) + malformedScr154.Transactions[11] = mtxFromB + recalculateMsgBlockMerkleRootsSize(malformedScr154) + b154test = dcrutil.NewBlock(malformedScr154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrScriptValidation sanity check: %v", + err) + } + + // Fails and hits ErrScriptMalformed. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrScriptMalformed { + t.Errorf("Unexpected no or wrong error for ErrScriptMalformed test: %v", + err) + } + + // ---------------------------------------------------------------------------- + // ErrZeroValueOutputSpend + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(regularTx154) + + zeroValueTxH, _ := chainhash.NewHashFromStr("9432be62a2c664ad021fc3567c" + + "700239067cfaa59be5b67b5808b158dfaed060") + zvi := new(wire.TxIn) + zvi.BlockHeight = 83 + zvi.BlockIndex = 0 + zvi.ValueIn = 0 + zvi.SignatureScript = []byte{0x51} + zvi.Sequence = 0xffffffff + zvi.PreviousOutPoint.Hash = *zeroValueTxH + zvi.PreviousOutPoint.Index = 1 + zvi.PreviousOutPoint.Tree = 1 + mtxFromB.AddTxIn(zvi) + spendZeroValueIn154 := new(wire.MsgBlock) + spendZeroValueIn154.FromBytes(block154Bytes) + spendZeroValueIn154.Transactions[11] = mtxFromB + + recalculateMsgBlockMerkleRootsSize(spendZeroValueIn154) + b154test = dcrutil.NewBlock(spendZeroValueIn154) + b154test.SetHeight(int64(testsIdx2)) + + err = blockchain.CheckWorklessBlockSanity(b154test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrZeroValueOutputSpend sanity "+ + "check: %v", err) + } + + // Fails and hits ErrZeroValueOutputSpend. + err = chain.CheckConnectBlock(b154test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrZeroValueOutputSpend { + t.Errorf("Unexpected no or wrong error for "+ + "ErrZeroValueOutputSpend test: %v", err) + } + + // ---------------------------------------------------------------------------- + // DoubleSpend/TxTree invalidation edge case testing + // + // Load up to block 166. 165 invalidates its previous tx tree, making + // it good for testing. + for i := testsIdx2; i < testsIdx3; i++ { + bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) + if err != nil { + t.Errorf("NewBlockFromBytes error: %v", err.Error()) + } + bl.SetHeight(int64(i)) + + // Double check and ensure there's no cross tree spending in + // block 164. + if i == 164 { + for _, stx := range bl.MsgBlock().STransactions { + for j, sTxIn := range stx.TxIn { + for _, tx := range bl.MsgBlock().Transactions { + h := tx.TxSha() + if h == sTxIn.PreviousOutPoint.Hash { + t.Errorf("Illegal cross tree reference ("+ + "stx %v references tx %v in input %v)", + stx.TxSha(), h, j) + } + } + } } } + + _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) + if err != nil { + t.Errorf("ProcessBlock error: %v", err.Error()) + } + } + block166Bytes := blockChain[int64(testsIdx3)] + + // ---------------------------------------------------------------------------- + // Attempt to spend from TxTreeRegular of block 164, which should never + // have existed. + spendFrom164RegB, _ := hex.DecodeString("01000000016a7a4928f20fbdeca6c0dd534" + + "8110d26e7abb91549d846638db6379ecae300f70500000000ffffffff01c095a9" + + "050000000000001976a91487bd9a1466619fa8253baa37ffca87bb5b1892da88a" + + "c000000000000000001ffffffffffffffff00000000ffffffff00") + mtxFromB = new(wire.MsgTx) + mtxFromB.FromBytes(spendFrom164RegB) + spendInvalid166 := new(wire.MsgBlock) + spendInvalid166.FromBytes(block166Bytes) + spendInvalid166.AddTransaction(mtxFromB) + + recalculateMsgBlockMerkleRootsSize(spendInvalid166) + b166test := dcrutil.NewBlock(spendInvalid166) + b166test.SetHeight(int64(testsIdx3)) + + err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrMissingTx test 1 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrMissingTx. + err = chain.CheckConnectBlock(b166test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrMissingTx { + t.Errorf("Unexpected no or wrong error for "+ + "ErrMissingTx test 1: %v", err) + } + + // ---------------------------------------------------------------------------- + // Try to buy a ticket with this block's coinbase transaction, which + // should not be allowed because it doesn't yet exist. + sstxSpendInvalid166 := new(wire.MsgBlock) + sstxSpendInvalid166.FromBytes(block166Bytes) + sstxToUse166 := sstxSpendInvalid166.STransactions[5] + + // Craft an otherwise valid sstx. + coinbaseHash := spendInvalid166.Transactions[0].TxSha() + sstxCBIn := new(wire.TxIn) + sstxCBIn.ValueIn = 29702992297 + sstxCBIn.PreviousOutPoint.Hash = coinbaseHash + sstxCBIn.PreviousOutPoint.Index = 2 + sstxCBIn.PreviousOutPoint.Tree = 0 + sstxCBIn.BlockHeight = 166 + sstxCBIn.BlockIndex = 0 + sstxCBIn.Sequence = 4294967295 + sstxCBIn.SignatureScript = []byte{0x51, 0x51} + sstxToUse166.AddTxIn(sstxCBIn) + + orgAddr, _ := dcrutil.DecodeAddress(simNetParams.OrganizationAddress, + simNetParams) + pkScript, _ := txscript.GenerateSStxAddrPush(orgAddr, + dcrutil.Amount(29702992297), 0x0000) + txOut := wire.NewTxOut(int64(0), pkScript) + sstxToUse166.AddTxOut(txOut) + pkScript, _ = txscript.PayToSStxChange(orgAddr) + txOut = wire.NewTxOut(0, pkScript) + sstxToUse166.AddTxOut(txOut) + + recalculateMsgBlockMerkleRootsSize(sstxSpendInvalid166) + b166test = dcrutil.NewBlock(sstxSpendInvalid166) + b166test.SetHeight(int64(testsIdx3)) + + err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrMissingTx test 2 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrMissingTx. + err = chain.CheckConnectBlock(b166test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrMissingTx { + t.Errorf("Unexpected no or wrong error for "+ + "ErrMissingTx test 2: %v", err) + } + + // ---------------------------------------------------------------------------- + // Try to spend immature change from one SStx in another SStx. + sstxSpend2Invalid166 := new(wire.MsgBlock) + sstxSpend2Invalid166.FromBytes(block166Bytes) + sstxToUse166 = sstxSpend2Invalid166.STransactions[6] + sstxChangeHash := spendInvalid166.STransactions[5].TxSha() + sstxChangeIn := new(wire.TxIn) + sstxChangeIn.ValueIn = 2345438298 + sstxChangeIn.PreviousOutPoint.Hash = sstxChangeHash + sstxChangeIn.PreviousOutPoint.Index = 2 + sstxChangeIn.PreviousOutPoint.Tree = 1 + sstxChangeIn.BlockHeight = 166 + sstxChangeIn.BlockIndex = 5 + sstxChangeIn.Sequence = 4294967295 + sstxChangeIn.SignatureScript = []byte{0x51, 0x51} + sstxToUse166.AddTxIn(sstxChangeIn) + + pkScript, _ = txscript.GenerateSStxAddrPush(orgAddr, + dcrutil.Amount(2345438298), 0x0000) + txOut = wire.NewTxOut(int64(0), pkScript) + sstxToUse166.AddTxOut(txOut) + pkScript, _ = txscript.PayToSStxChange(orgAddr) + txOut = wire.NewTxOut(0, pkScript) + sstxToUse166.AddTxOut(txOut) + + recalculateMsgBlockMerkleRootsSize(sstxSpend2Invalid166) + b166test = dcrutil.NewBlock(sstxSpend2Invalid166) + b166test.SetHeight(int64(testsIdx3)) + + err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrMissingTx test 3 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrMissingTx. It may not be immediately clear + // why this happens, but in the case of the stake transaction + // tree, because you can't spend in chains, the txlookup code + // doesn't even bother to populate the spent list in the txlookup + // and instead just writes the transaction hash as being missing. + // This output doesn't become legal to spend until the next block. + err = chain.CheckConnectBlock(b166test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrMissingTx { + t.Errorf("Unexpected no or wrong error for "+ + "ErrMissingTx test 3: %v", err) + } + + // ---------------------------------------------------------------------------- + // Try to double spend the same input in the stake transaction tree. + sstxSpend3Invalid166 := new(wire.MsgBlock) + sstxSpend3Invalid166.FromBytes(block166Bytes) + sstxToUse166 = sstxSpend3Invalid166.STransactions[6] + sstxToUse166.AddTxIn(sstxSpend3Invalid166.STransactions[5].TxIn[0]) + + sstxToUse166.AddTxOut(sstxSpend3Invalid166.STransactions[5].TxOut[1]) + sstxToUse166.AddTxOut(sstxSpend3Invalid166.STransactions[5].TxOut[2]) + + recalculateMsgBlockMerkleRootsSize(sstxSpend3Invalid166) + b166test = dcrutil.NewBlock(sstxSpend3Invalid166) + b166test.SetHeight(int64(testsIdx3)) + + err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrDoubleSpend test 1 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrDoubleSpend. + err = chain.CheckConnectBlock(b166test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrDoubleSpend { + t.Errorf("Unexpected no or wrong error for "+ + "ErrDoubleSpend test 1: %v", err) + } + + // ---------------------------------------------------------------------------- + // Try to double spend an input in the unconfirmed tx tree regular + // that's already spent in the stake tree. + regTxSpendStakeIn166 := new(wire.MsgBlock) + regTxSpendStakeIn166.FromBytes(block166Bytes) + sstxIn := regTxSpendStakeIn166.STransactions[5].TxIn[0] + regTxSpendStakeIn166.Transactions[2].AddTxIn(sstxIn) + + recalculateMsgBlockMerkleRootsSize(regTxSpendStakeIn166) + b166test = dcrutil.NewBlock(regTxSpendStakeIn166) + b166test.SetHeight(int64(testsIdx3)) + + err = blockchain.CheckWorklessBlockSanity(b166test, timeSource, simNetParams) + if err != nil { + t.Errorf("got unexpected error for ErrDoubleSpend test 2 sanity "+ + "check: %v", err) + } + + // Fails and hits ErrDoubleSpend. + err = chain.CheckConnectBlock(b166test) + if err == nil || err.(blockchain.RuleError).GetCode() != + blockchain.ErrDoubleSpend { + t.Errorf("Unexpected no or wrong error for "+ + "ErrDoubleSpend test 2: %v", err) } } - -// Block100000 defines block 100,000 of the block chain. It is used to -// test Block operations. -var Block100000 = wire.MsgBlock{ - Header: wire.BlockHeader{ - Version: 1, - PrevBlock: wire.ShaHash([32]byte{ // Make go vet happy. - 0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04, - 0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9, - 0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f, - 0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, - }), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250 - MerkleRoot: wire.ShaHash([32]byte{ // Make go vet happy. - 0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0, - 0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22, - 0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85, - 0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3, - }), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766 - Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC - Bits: 0x1b04864c, // 453281356 - Nonce: 0x10572b0f, // 274148111 - }, - Transactions: []*wire.MsgTx{ - { - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, - Index: 0xffffffff, - }, - SignatureScript: []byte{ - 0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02, - }, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 0x12a05f200, // 5000000000 - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25, - 0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73, - 0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7, - 0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16, - 0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24, - 0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed, - 0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28, - 0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf, - 0x84, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 0, - }, - { - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ // Make go vet happy. - 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, - 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, - 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, - 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, - }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 - Index: 0, - }, - SignatureScript: []byte{ - 0x49, // OP_DATA_73 - 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, - 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, - 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, - 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, - 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, - 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, - 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, - 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, - 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, - 0x01, // 73-byte signature - 0x41, // OP_DATA_65 - 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, - 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, - 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, - 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, - 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, - 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, - 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, - 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, - 0xd3, // 65-byte pubkey - }, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 0x2123e300, // 556000000 - PkScript: []byte{ - 0x76, // OP_DUP - 0xa9, // OP_HASH160 - 0x14, // OP_DATA_20 - 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, - 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, - 0xf7, 0xf5, 0x8b, 0x32, - 0x88, // OP_EQUALVERIFY - 0xac, // OP_CHECKSIG - }, - }, - { - Value: 0x108e20f00, // 4444000000 - PkScript: []byte{ - 0x76, // OP_DUP - 0xa9, // OP_HASH160 - 0x14, // OP_DATA_20 - 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, - 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, - 0x52, 0xde, 0x3d, 0x7c, - 0x88, // OP_EQUALVERIFY - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 0, - }, - { - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ // Make go vet happy. - 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, - 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, - 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, - 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, - }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 - Index: 1, - }, - SignatureScript: []byte{ - 0x47, // OP_DATA_71 - 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, - 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, - 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, - 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, - 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, - 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, - 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, - 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, - 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, - 0x41, // OP_DATA_65 - 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, - 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, - 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, - 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, - 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, - 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, - 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, - 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, - 0x0f, // 65-byte pubkey - }, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 0xf4240, // 1000000 - PkScript: []byte{ - 0x76, // OP_DUP - 0xa9, // OP_HASH160 - 0x14, // OP_DATA_20 - 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, - 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, - 0xad, 0xbe, 0x7e, 0x10, - 0x88, // OP_EQUALVERIFY - 0xac, // OP_CHECKSIG - }, - }, - { - Value: 0x11d260c0, // 299000000 - PkScript: []byte{ - 0x76, // OP_DUP - 0xa9, // OP_HASH160 - 0x14, // OP_DATA_20 - 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, - 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, - 0xb3, 0x40, 0x9c, 0xd9, - 0x88, // OP_EQUALVERIFY - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 0, - }, - { - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ // Make go vet happy. - 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, - 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, - 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, - 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, - }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b - Index: 0, - }, - SignatureScript: []byte{ - 0x49, // OP_DATA_73 - 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, - 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, - 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, - 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, - 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, - 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, - 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, - 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, - 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, - 0x01, // 73-byte signature - 0x41, // OP_DATA_65 - 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, - 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, - 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, - 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, - 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, - 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, - 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, - 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, - 0xbb, // 65-byte pubkey - }, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 0xf4240, // 1000000 - PkScript: []byte{ - 0x76, // OP_DUP - 0xa9, // OP_HASH160 - 0x14, // OP_DATA_20 - 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, - 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, - 0xf2, 0xeb, 0x9e, 0xe0, - 0x88, // OP_EQUALVERIFY - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 0, - }, - }, -} diff --git a/blocklogger.go b/blocklogger.go index 1901a708..8e9654d0 100644 --- a/blocklogger.go +++ b/blocklogger.go @@ -5,7 +5,7 @@ import ( "time" "github.com/btcsuite/btclog" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrutil" ) // blockProgressLogger provides periodic logging for other services in order @@ -36,7 +36,7 @@ func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *block // LogBlockHeight logs a new block height as an information message to show // progress to the user. In order to prevent spam, it limits logging to one // message every 10 seconds with duration and totals included. -func (b *blockProgressLogger) LogBlockHeight(block *btcutil.Block) { +func (b *blockProgressLogger) LogBlockHeight(block *dcrutil.Block) { b.Lock() defer b.Unlock() diff --git a/blockmanager.go b/blockmanager.go index 1fb90538..7a346f51 100644 --- a/blockmanager.go +++ b/blockmanager.go @@ -1,23 +1,31 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( + "bytes" "container/list" - "net" + "encoding/gob" + "errors" + "fmt" + "io/ioutil" + "math/rand" "os" "path/filepath" "sync" "sync/atomic" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + dcrdb "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -32,6 +40,10 @@ const ( // database type is appended to this value to form the full block // database name. blockDbNamePrefix = "blocks" + + // maxResendLimit is the maximum number of times a node can resend a + // block or transaction before it is dropped. + maxResendLimit = 3 ) // newPeerMsg signifies a newly connected peer to the block handler. @@ -39,21 +51,21 @@ type newPeerMsg struct { peer *peer } -// blockMsg packages a bitcoin block message and the peer it came from together +// blockMsg packages a decred block message and the peer it came from together // so the block handler has access to that information. type blockMsg struct { - block *btcutil.Block + block *dcrutil.Block peer *peer } -// invMsg packages a bitcoin inv message and the peer it came from together +// invMsg packages a decred inv message and the peer it came from together // so the block handler has access to that information. type invMsg struct { inv *wire.MsgInv peer *peer } -// headersMsg packages a bitcoin headers message and the peer it came from +// headersMsg packages a decred headers message and the peer it came from // together so the block handler has access to that information. type headersMsg struct { headers *wire.MsgHeaders @@ -65,10 +77,10 @@ type donePeerMsg struct { peer *peer } -// txMsg packages a bitcoin tx message and the peer it came from together +// txMsg packages a decred tx message and the peer it came from together // so the block handler has access to that information. type txMsg struct { - tx *btcutil.Tx + tx *dcrutil.Tx peer *peer } @@ -78,11 +90,28 @@ type getSyncPeerMsg struct { reply chan *peer } +// requestFromPeerMsg is a message type to be sent across the message channel +// for requesting either blocks or transactions from a given peer. It routes +// this through the block manager so the block manager doesn't ban the peer +// when it sends this information back. +type requestFromPeerMsg struct { + peer *peer + blocks []*chainhash.Hash + txs []*chainhash.Hash + reply chan requestFromPeerResponse +} + +// requestFromPeerRespons eis a response sent to the reply channel of a +// requestFromPeerMsg query. +type requestFromPeerResponse struct { + err error +} + // checkConnectBlockMsg is a message type to be sent across the message channel // for requesting chain to check if a block connects to the end of the current // main chain. type checkConnectBlockMsg struct { - block *btcutil.Block + block *dcrutil.Block reply chan error } @@ -100,6 +129,122 @@ type calcNextReqDifficultyMsg struct { reply chan calcNextReqDifficultyResponse } +// calcNextReqDiffNodeResponse is a response sent to the reply channel of a +// calcNextReqDiffNodeMsg query. +type calcNextReqDiffNodeResponse struct { + difficulty uint32 + err error +} + +// calcNextReqDiffNodeMsg is a message type to be sent across the message +// channel for requesting the required difficulty for some block building on +// the given block hash. +type calcNextReqDiffNodeMsg struct { + hash *chainhash.Hash + timestamp time.Time + reply chan calcNextReqDifficultyResponse +} + +// calcNextReqStakeDifficultyResponse is a response sent to the reply channel of a +// calcNextReqStakeDifficultyMsg query. +type calcNextReqStakeDifficultyResponse struct { + stakeDifficulty int64 + err error +} + +// calcNextReqStakeDifficultyMsg is a message type to be sent across the message +// channel for requesting the required stake difficulty of the next block. +type calcNextReqStakeDifficultyMsg struct { + reply chan calcNextReqStakeDifficultyResponse +} + +// getBlockFromHashResponse is a response sent to the reply channel of a +// getBlockFromHashMsg query. +type getBlockFromHashResponse struct { + block *dcrutil.Block + err error +} + +// getBlockFromHashMsg is a message type to be sent across the message +// channel for requesting the required a given block from the block manager. +type getBlockFromHashMsg struct { + hash chainhash.Hash + reply chan getBlockFromHashResponse +} + +// getGenerationResponse is a response sent to the reply channel of a +// getGenerationMsg query. +type getGenerationResponse struct { + hashes []chainhash.Hash + err error +} + +// getGenerationMsg is a message type to be sent across the message +// channel for requesting the required the entire generation of a +// block node. +type getGenerationMsg struct { + hash chainhash.Hash + reply chan getGenerationResponse +} + +// forceReorganizationResponse is a response sent to the reply channel of a +// forceReorganizationMsg query. +type forceReorganizationResponse struct { + err error +} + +// forceReorganizationMsg is a message type to be sent across the message +// channel for requesting that the block on head be reorganized to one of its +// adjacent orphans. +type forceReorganizationMsg struct { + formerBest chainhash.Hash + newBest chainhash.Hash + reply chan forceReorganizationResponse +} + +// getLotterDataResponse is a response sent to the reply channel of a +// getLotteryDataMsg query. +type getLotterDataResponse struct { + finalState [6]byte + poolSize uint32 + winningTickets []chainhash.Hash + err error +} + +// getLotteryDataMsg is a message type to be sent across the message +// channel for requesting lottery data about some block. +type getLotteryDataMsg struct { + hash chainhash.Hash + reply chan getLotterDataResponse +} + +// checkMissedTicketsResponse is a response sent to the reply channel of a +// checkMissedTicketsMsg query. +type checkMissedTicketsResponse struct { + missedTickets map[chainhash.Hash]bool +} + +// checkMissedTicketsMsg is a message type to be sent across the message +// channel used for checking whether or not a list of tickets has been missed. +type checkMissedTicketsMsg struct { + tickets []chainhash.Hash + reply chan checkMissedTicketsResponse +} + +// getTopBlockResponse is a response to the request for the block at HEAD of the +// blockchain. We need to be able to obtain this from blockChain for mining +// purposes. +type getTopBlockResponse struct { + block dcrutil.Block + err error +} + +// calcNextReqStakeDifficultyMsg is a message type to be sent across the message +// channel for requesting the required stake difficulty of the next block. +type getTopBlockMsg struct { + reply chan getTopBlockResponse +} + // processBlockResponse is a response sent to the reply channel of a // processBlockMsg. type processBlockResponse struct { @@ -107,6 +252,33 @@ type processBlockResponse struct { err error } +// processBlockMsg is a message type to be sent across the message channel +// for requested a block is processed. Note this call differs from blockMsg +// above in that blockMsg is intended for blocks that came from peers and have +// extra handling whereas this message essentially is just a concurrent safe +// way to call ProcessBlock on the internal block chain instance. +type processBlockMsg struct { + block *dcrutil.Block + flags blockchain.BehaviorFlags + reply chan processBlockResponse +} + +// processTransactionResponse is a response sent to the reply channel of a +// processTransactionMsg. +type processTransactionResponse struct { + err error +} + +// processTransactionMsg is a message type to be sent across the message +// channel for requesting a transaction to be processed through the block +// manager. +type processTransactionMsg struct { + tx *dcrutil.Tx + allowOrphans bool + rateLimit bool + reply chan processTransactionResponse +} + // fetchTransactionStoreResponse is a response sent to the reply channel of a // fetchTransactionStoreMsg. type fetchTransactionStoreResponse struct { @@ -117,19 +289,9 @@ type fetchTransactionStoreResponse struct { // fetchTransactionStoreMsg is a message type to be sent across the message // channel fetching the tx input store for some Tx. type fetchTransactionStoreMsg struct { - tx *btcutil.Tx - reply chan fetchTransactionStoreResponse -} - -// processBlockMsg is a message type to be sent across the message channel -// for requested a block is processed. Note this call differs from blockMsg -// above in that blockMsg is intended for blocks that came from peers and have -// extra handling whereas this message essentially is just a concurrent safe -// way to call ProcessBlock on the internal block chain instance. -type processBlockMsg struct { - block *btcutil.Block - flags blockchain.BehaviorFlags - reply chan processBlockResponse + tx *dcrutil.Tx + isTreeValid bool + reply chan fetchTransactionStoreResponse } // isCurrentMsg is a message type to be sent across the message channel for @@ -139,6 +301,19 @@ type isCurrentMsg struct { reply chan bool } +// missedTicketsMsg handles a request for the list of currently missed tickets +// from the ticket database. +type missedTicketsMsg struct { + reply chan missedTicketsResponse +} + +// missedTicketsResponse is a response sent to the reply channel of a +// ticketBucketsMsg. +type missedTicketsResponse struct { + Tickets stake.SStxMemMap + err error +} + // pauseMsg is a message type to be sent across the message channel for // pausing the block manager. This effectively provides the caller with // exclusive access over the manager until a receive is performed on the @@ -147,23 +322,89 @@ type pauseMsg struct { unpause <-chan struct{} } +// ticketsForAddressMsg handles a request for obtaining all the current +// tickets corresponding to some address. +type ticketsForAddressMsg struct { + Address dcrutil.Address + reply chan ticketsForAddressResponse +} + +// ticketsForAddressResponse is a response to the reply channel of a +// ticketsForAddressMsg. +type ticketsForAddressResponse struct { + Tickets []chainhash.Hash + err error +} + +// getCurrentTemplateMsg handles a request for the current mining block template. +type getCurrentTemplateMsg struct { + reply chan getCurrentTemplateResponse +} + +// getCurrentTemplateResponse is a response sent to the reply channel of a +// getCurrentTemplateMsg. +type getCurrentTemplateResponse struct { + Template *BlockTemplate +} + +// setCurrentTemplateMsg handles a request to change the current mining block +// template. +type setCurrentTemplateMsg struct { + Template *BlockTemplate + reply chan setCurrentTemplateResponse +} + +// setCurrentTemplateResponse is a response sent to the reply channel of a +// setCurrentTemplateMsg. +type setCurrentTemplateResponse struct { +} + +// getParentTemplateMsg handles a request for the current parent mining block +// template. +type getParentTemplateMsg struct { + reply chan getParentTemplateResponse +} + +// getParentTemplateResponse is a response sent to the reply channel of a +// getParentTemplateMsg. +type getParentTemplateResponse struct { + Template *BlockTemplate +} + +// setParentTemplateMsg handles a request to change the parent mining block +// template. +type setParentTemplateMsg struct { + Template *BlockTemplate + reply chan setParentTemplateResponse +} + +// setParentTemplateResponse is a response sent to the reply channel of a +// setParentTemplateMsg. +type setParentTemplateResponse struct { +} + // headerNode is used as a node in a list of headers that are linked together // between checkpoints. type headerNode struct { height int64 - sha *wire.ShaHash + sha *chainhash.Hash } // chainState tracks the state of the best chain as blocks are inserted. This -// is done because btcchain is currently not safe for concurrent access and the +// is done because blockchain is currently not safe for concurrent access and the // block manager is typically quite busy processing block and inventory. // Therefore, requesting this information from chain through the block manager // would not be anywhere near as efficient as simply updating it as each block // is inserted and protecting it with a mutex. type chainState struct { sync.Mutex - newestHash *wire.ShaHash + newestHash *chainhash.Hash newestHeight int64 + nextFinalState [6]byte + nextPoolSize uint32 + winningTickets []chainhash.Hash + missedTickets []chainhash.Hash + curBlockHeader *wire.BlockHeader pastMedianTime time.Time pastMedianTimeErr error } @@ -172,42 +413,110 @@ type chainState struct { // chain. // // This function is safe for concurrent access. -func (c *chainState) Best() (*wire.ShaHash, int64) { +func (c *chainState) Best() (*chainhash.Hash, int64) { c.Lock() defer c.Unlock() return c.newestHash, c.newestHeight } +// NextWPO returns next winner, potential, and overflow for the current top block +// of the blockchain. +// +// This function is safe for concurrent access. +func (c *chainState) NextFinalState() [6]byte { + c.Lock() + defer c.Unlock() + + return c.nextFinalState +} + +func (c *chainState) NextPoolSize() uint32 { + c.Lock() + defer c.Unlock() + + return c.nextPoolSize +} + +// NextWinners returns the eligible SStx hashes to vote on the +// next block as inputs for SSGen. +// +// This function is safe for concurrent access. +func (c *chainState) NextWinners() []chainhash.Hash { + c.Lock() + defer c.Unlock() + + return c.winningTickets +} + +// CurrentlyMissed returns the eligible SStx hashes that can be revoked. +// +// This function is safe for concurrent access. +func (c *chainState) CurrentlyMissed() []chainhash.Hash { + c.Lock() + defer c.Unlock() + + return c.missedTickets +} + +// CurrentlyMissed returns the eligible SStx hashes to vote on the +// next block as inputs for SSGen. +// +// This function is safe for concurrent access. +func (c *chainState) GetTopBlockHeader() *wire.BlockHeader { + c.Lock() + defer c.Unlock() + + return c.curBlockHeader +} + +// BlockLotteryData refers to cached data that is generated when a block +// is inserted, so that it doesn't later need to be recalculated. +type BlockLotteryData struct { + ntfnData *WinningTicketsNtfnData + poolSize uint32 + finalState [6]byte +} + // blockManager provides a concurrency safe block manager for handling all // incoming blocks. type blockManager struct { - server *server - started int32 - shutdown int32 - blockChain *blockchain.BlockChain - requestedTxns map[wire.ShaHash]struct{} - requestedBlocks map[wire.ShaHash]struct{} - progressLogger *blockProgressLogger - receivedLogBlocks int64 - receivedLogTx int64 - processingReqs bool - syncPeer *peer - msgChan chan interface{} - chainState chainState - wg sync.WaitGroup - quit chan struct{} + server *server + started int32 + shutdown int32 + blockChain *blockchain.BlockChain + requestedTxns map[chainhash.Hash]struct{} + requestedEverTxns map[chainhash.Hash]uint8 + requestedBlocks map[chainhash.Hash]struct{} + requestedEverBlocks map[chainhash.Hash]uint8 + progressLogger *blockProgressLogger + receivedLogBlocks int64 + receivedLogTx int64 + lastBlockLogTime time.Time + processingReqs bool + syncPeer *peer + msgChan chan interface{} + chainState chainState + wg sync.WaitGroup + quit chan struct{} + + blockLotteryDataCache map[chainhash.Hash]*BlockLotteryData + blockLotteryDataCacheMutex *sync.Mutex // The following fields are used for headers-first mode. headersFirstMode bool headerList *list.List startHeader *list.Element nextCheckpoint *chaincfg.Checkpoint + + cachedCurrentTemplate *BlockTemplate + cachedParentTemplate *BlockTemplate + AggressiveMining bool } // resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer. -func (b *blockManager) resetHeaderState(newestHash *wire.ShaHash, newestHeight int64) { +func (b *blockManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int64) { b.headersFirstMode = false b.headerList.Init() b.startHeader = nil @@ -222,10 +531,17 @@ func (b *blockManager) resetHeaderState(newestHash *wire.ShaHash, newestHeight i } // updateChainState updates the chain state associated with the block manager. -// This allows fast access to chain information since btcchain is currently not +// This allows fast access to chain information since blockchain is currently not // safe for concurrent access and the block manager is typically quite busy // processing block and inventory. -func (b *blockManager) updateChainState(newestHash *wire.ShaHash, newestHeight int64) { +func (b *blockManager) updateChainState(newestHash *chainhash.Hash, + newestHeight int64, + finalState [6]byte, + poolSize uint32, + winningTickets []chainhash.Hash, + missedTickets []chainhash.Hash, + curBlockHeader *wire.BlockHeader) { + b.chainState.Lock() defer b.chainState.Unlock() @@ -237,6 +553,12 @@ func (b *blockManager) updateChainState(newestHash *wire.ShaHash, newestHeight i } else { b.chainState.pastMedianTime = medianTime } + + b.chainState.nextFinalState = finalState + b.chainState.nextPoolSize = poolSize + b.chainState.winningTickets = winningTickets + b.chainState.missedTickets = missedTickets + b.chainState.curBlockHeader = curBlockHeader } // findNextHeaderCheckpoint returns the next checkpoint after the passed height. @@ -341,7 +663,7 @@ func (b *blockManager) startSync(peers *list.List) { // not support the headers-first approach so do normal block // downloads when in regression test mode. if b.nextCheckpoint != nil && height < b.nextCheckpoint.Height && - !cfg.RegressionTest && !cfg.DisableCheckpoints { + !cfg.DisableCheckpoints { bestPeer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash) b.headersFirstMode = true @@ -360,31 +682,33 @@ func (b *blockManager) startSync(peers *list.List) { // isSyncCandidate returns whether or not the peer is a candidate to consider // syncing from. func (b *blockManager) isSyncCandidate(p *peer) bool { - // Typically a peer is not a candidate for sync if it's not a full node, - // however regression test is special in that the regression tool is - // not a full node and still needs to be considered a sync candidate. - if cfg.RegressionTest { - // The peer is not a candidate if it's not coming from localhost - // or the hostname can't be determined for some reason. - host, _, err := net.SplitHostPort(p.addr) - if err != nil { - return false - } - - if host != "127.0.0.1" && host != "localhost" { - return false - } - } else { - // The peer is not a candidate for sync if it's not a full node. - if p.services&wire.SFNodeNetwork != wire.SFNodeNetwork { - return false - } + // The peer is not a candidate for sync if it's not a full node. + if p.services&wire.SFNodeNetwork != wire.SFNodeNetwork { + return false } // Candidate if all checks passed. return true } +// syncMiningStateAfterSync polls the blockMananger for the current sync +// state; if the mananger is synced, it executes a call to the peer to +// sync the mining state to the network. +func (b *blockManager) syncMiningStateAfterSync(p *peer) { + ticker := time.NewTicker(time.Second * 3) + go func() { + for { + select { + case <-ticker.C: + if b.IsCurrent() { + p.PushGetMiningStateMsg() + return + } + } + } + }() +} + // handleNewPeerMsg deals with new peers that have signalled they may // be considered as a sync peer (they have already successfully negotiated). It // also starts syncing if needed. It is invoked from the syncHandler goroutine. @@ -406,6 +730,11 @@ func (b *blockManager) handleNewPeerMsg(peers *list.List, p *peer) { // Start syncing by choosing the best candidate if needed. b.startSync(peers) + + // Grab the mining state from this peer after we're synced. + if !cfg.NoMiningStateSync { + b.syncMiningStateAfterSync(p) + } } // handleDonePeerMsg deals with peers that have signalled they are done. It @@ -458,6 +787,41 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, p *peer) { } } +// logBlockHeight logs a new block height as an information message to show +// progress to the user. In order to prevent spam, it limits logging to one +// message every 10 seconds with duration and totals included. +func (b *blockManager) logBlockHeight(block *dcrutil.Block) { + b.receivedLogBlocks++ + b.receivedLogTx += int64(len(block.MsgBlock().Transactions)) + + now := time.Now() + duration := now.Sub(b.lastBlockLogTime) + if duration < time.Second*10 { + return + } + + // Truncate the duration to 10s of milliseconds. + durationMillis := int64(duration / time.Millisecond) + tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) + + // Log information about new block height. + blockStr := "blocks" + if b.receivedLogBlocks == 1 { + blockStr = "block" + } + txStr := "transactions" + if b.receivedLogTx == 1 { + txStr = "transaction" + } + bmgrLog.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)", + b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, + txStr, block.Height(), block.MsgBlock().Header.Timestamp) + + b.receivedLogBlocks = 0 + b.receivedLogTx = 0 + b.lastBlockLogTime = now +} + // handleTxMsg handles transaction messages from all peers. func (b *blockManager) handleTxMsg(tmsg *txMsg) { // NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of @@ -508,8 +872,10 @@ func (b *blockManager) handleTxMsg(tmsg *txMsg) { // current returns true if we believe we are synced with our peers, false if we // still have blocks to check func (b *blockManager) current() bool { - if !b.blockChain.IsCurrent(b.server.timeSource) { - return false + if !cfg.TestNet { + if !b.blockChain.IsCurrent(b.server.timeSource) { + return false + } } // if blockChain thinks we are current and we have no syncPeer it @@ -524,23 +890,157 @@ func (b *blockManager) current() bool { // TODO(oga) we can get chain to return the height of each block when we // parse an orphan, which would allow us to update the height of peers // from what it was at initial handshake. - if err != nil || height < int64(b.syncPeer.lastBlock) { + if err != nil || height < int64(b.syncPeer.startingHeight) { return false } + return true } +// checkBlockForHiddenVotes checks to see if a newly added block contains +// any votes that were previously unknown to our daemon. If it does, it +// adds these votes to the cached parent block template. +// +// This is UNSAFE for concurrent access. +func (bm *blockManager) checkBlockForHiddenVotes(block *dcrutil.Block) { + var votesFromBlock []*dcrutil.Tx + + for _, stx := range block.STransactions() { + isSSGen, _ := stake.IsSSGen(stx) + if isSSGen { + votesFromBlock = append(votesFromBlock, stx) + } + } + + // Identify the cached parent template; it's possible that + // the parent template hasn't yet been updated, so we may + // need to use the current template. + var template *BlockTemplate + + if bm.cachedCurrentTemplate != nil { + if bm.cachedCurrentTemplate.height == + block.Height() { + template = bm.cachedCurrentTemplate + } + } + if template == nil && + bm.cachedParentTemplate != nil { + if bm.cachedParentTemplate.height == + block.Height() { + template = bm.cachedParentTemplate + } + } + + // Now that we have the template, grab the votes and compare + // them with those found in the newly added block. If we don't + // the votes, they will need to be added to our block template. + var updatedTxTreeStake []*dcrutil.Tx + numVotes := 0 + if template != nil { + var newVotes []*dcrutil.Tx + + templateBlock := dcrutil.NewBlock(template.block) + templateBlock.SetHeight(template.height) + for _, vote := range votesFromBlock { + haveIt := false + + for _, stx := range templateBlock.STransactions() { + isSSGen, _ := stake.IsSSGen(stx) + if isSSGen { + if vote.Sha().IsEqual(stx.Sha()) { + haveIt = true + numVotes++ + break + } + } + } + + if !haveIt { + // Jam it directly into the block. + template.block.AddSTransaction(vote.MsgTx()) + newVotes = append(newVotes, vote) + numVotes++ + } + } + + // We have the list of new votes now; append it to the + // list of template stake transactions. + updatedTxTreeStake = append(templateBlock.STransactions(), + newVotes...) + } else { + // We have no template, so nothing to update. + return + } + + // Create a new coinbase. + random, err := wire.RandomUint64() + if err != nil { + return + } + height := block.MsgBlock().Header.Height + opReturnPkScript, err := standardCoinbaseOpReturn(height, + []uint64{0, 0, 0, random}) + if err != nil { + bmgrLog.Warnf("failed to create coinbase OP_RETURN while generating " + + "block with extra found voters") + return + } + coinbase, err := createCoinbaseTx( + template.block.Transactions[0].TxIn[0].SignatureScript, + opReturnPkScript, + int64(template.block.Header.Height), + cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))], + uint16(numVotes), + bm.server.chainParams) + if err != nil { + bmgrLog.Warnf("failed to create coinbase while generating " + + "block with extra found voters") + return + } + template.block.Transactions[0] = coinbase.MsgTx() + + // Patch the header. First, reconstruct the merkle trees, then + // correct the number of voters, and finally recalculate the size. + var updatedTxTreeRegular []*dcrutil.Tx + updatedTxTreeRegular = append(updatedTxTreeRegular, coinbase) + for i, mtx := range template.block.Transactions { + // Coinbase + if i == 0 { + continue + } + tx := dcrutil.NewTx(mtx) + updatedTxTreeRegular = append(updatedTxTreeRegular, tx) + } + merkles := blockchain.BuildMerkleTreeStore(updatedTxTreeRegular) + template.block.Header.StakeRoot = *merkles[len(merkles)-1] + smerkles := blockchain.BuildMerkleTreeStore(updatedTxTreeStake) + template.block.Header.Voters = uint16(numVotes) + template.block.Header.StakeRoot = *smerkles[len(smerkles)-1] + template.block.Header.Size = uint32(template.block.SerializeSize()) + + return +} + // handleBlockMsg handles block messages from all peers. func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { // If we didn't ask for this block then the peer is misbehaving. blockSha := bmsg.block.Sha() if _, ok := bmsg.peer.requestedBlocks[*blockSha]; !ok { - // The regression test intentionally sends some blocks twice - // to test duplicate block insertion fails. Don't disconnect - // the peer or ignore the block when we're in regression test - // mode in this case so the chain code is actually fed the - // duplicate blocks. - if !cfg.RegressionTest { + // Check to see if we ever requested this block, since it may + // have been accidentally sent in duplicate. If it was, + // increment the counter in the ever requested map and make + // sure that the node isn't spamming us with these blocks. + received, ok := b.requestedEverBlocks[*blockSha] + if ok { + if received > maxResendLimit { + bmgrLog.Warnf("Got duplicate block %v from %s -- "+ + "too many times, disconnecting", blockSha, + bmsg.peer.addr) + bmsg.peer.Disconnect() + return + } + b.requestedEverBlocks[*blockSha]++ + } else { bmgrLog.Warnf("Got unrequested block %v from %s -- "+ "disconnecting", blockSha, bmsg.peer.addr) bmsg.peer.Disconnect() @@ -580,8 +1080,9 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { // Process the block to include validation, best chain selection, orphan // handling, etc. - isOrphan, err := b.blockChain.ProcessBlock(bmsg.block, + onMainChain, isOrphan, err := b.blockChain.ProcessBlock(bmsg.block, b.server.timeSource, behaviorFlags) + if err != nil { // When the error is a rule error, it means the block was simply // rejected as opposed to something actually going wrong, so log @@ -613,7 +1114,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { // if we are actively syncing while the chain is not yet current or // who may have lost the lock announcment race. var heightUpdate int32 - var blkShaUpdate *wire.ShaHash + var blkShaUpdate *chainhash.Hash // Request the parents for the orphan block from the peer that sent it. if isOrphan { @@ -623,19 +1124,9 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { // Extraction is only attempted if the block's version is // high enough (ver 2+). header := &bmsg.block.MsgBlock().Header - if blockchain.ShouldHaveSerializedBlockHeight(header) { - coinbaseTx := bmsg.block.Transactions()[0] - cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx) - if err != nil { - bmgrLog.Warnf("Unable to extract height from "+ - "coinbase tx: %v", err) - } else { - bmgrLog.Debugf("Extracted height of %v from "+ - "orphan block", cbHeight) - heightUpdate = int32(cbHeight) - blkShaUpdate = blockSha - } - } + cbHeight := header.Height + heightUpdate = int32(cbHeight) + blkShaUpdate = blockSha orphanRoot := b.blockChain.GetOrphanRoot(blockSha) locator, err := b.blockChain.LatestBlockLocator() @@ -649,24 +1140,114 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { // When the block is not an orphan, log information about it and // update the chain state. b.progressLogger.LogBlockHeight(bmsg.block) + r := b.server.rpcServer - // Query the db for the latest best block since the block - // that was processed could be on a side chain or have caused - // a reorg. - newestSha, newestHeight, _ := b.server.db.NewestSha() - b.updateChainState(newestSha, newestHeight) + // Query the DB for the winning SStx for the next top block if we've + // reached stake validation height. Broadcast them if this is the first + // time determining them. + b.blockLotteryDataCacheMutex.Lock() + broadcastWinners := false + lotteryData := new(BlockLotteryData) - // Update this peer's latest block height, for future - // potential sync node candidancy. - heightUpdate = int32(newestHeight) - blkShaUpdate = newestSha + _, exists := b.blockLotteryDataCache[*blockSha] + if !exists { + winningTickets, poolSize, finalState, err := + b.blockChain.GetWinningTickets(*blockSha) + if err != nil && int64(bmsg.block.MsgBlock().Header.Height) >= + b.server.chainParams.StakeValidationHeight-1 { + bmgrLog.Errorf("Failed to get next winning tickets: %v", err) - // Allow any clients performing long polling via the - // getblocktemplate RPC to be notified when the new block causes - // their old block template to become stale. - rpcServer := b.server.rpcServer - if rpcServer != nil { - rpcServer.gbtWorkState.NotifyBlockConnected(blockSha) + code, reason := errToRejectErr(err) + bmsg.peer.PushRejectMsg(wire.CmdBlock, code, reason, + blockSha, false) + b.blockLotteryDataCacheMutex.Unlock() + return + } + + winningTicketsNtfn := &WinningTicketsNtfnData{ + *blockSha, + int64(bmsg.block.MsgBlock().Header.Height), + winningTickets} + lotteryData = &BlockLotteryData{ + winningTicketsNtfn, + uint32(poolSize), + finalState, + } + b.blockLotteryDataCache[*blockSha] = lotteryData + broadcastWinners = true + b.blockLotteryDataCacheMutex.Unlock() + } else { + lotteryData, _ = b.blockLotteryDataCache[*blockSha] + b.blockLotteryDataCacheMutex.Unlock() + } + if r != nil && broadcastWinners { + // Rebroadcast the existing data to WS clients. + r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData) + } + + if onMainChain { + // A new block is connected, however, this new block may have + // votes in it that were hidden from the network and which + // validate our parent block. We should bolt these new votes + // into the tx tree stake of the old block template on parent. + svl := b.server.chainParams.StakeValidationHeight + if b.AggressiveMining && bmsg.block.Height() >= svl { + b.checkBlockForHiddenVotes(bmsg.block) + } + + // Query the db for the latest best block since the block + // that was processed could be on a side chain or have caused + // a reorg. + newestSha, newestHeight, _ := b.server.db.NewestSha() + + // Query the DB for the missed tickets for the next top block. + missedTickets := b.blockChain.GetMissedTickets() + + // Retrieve the current block header. + curBlockHeader := b.blockChain.GetCurrentBlockHeader() + + if r != nil { + // Update registered websocket clients on the + // current stake difficulty. + nextStakeDiff, err := + b.blockChain.CalcNextRequiredStakeDifficulty() + if err != nil { + bmgrLog.Warnf("Failed to get next stake difficulty "+ + "calculation: %v", err) + + } else { + r.ntfnMgr.NotifyStakeDifficulty( + &StakeDifficultyNtfnData{ + *newestSha, + newestHeight, + nextStakeDiff, + }) + b.server.txMemPool.PruneStakeTx(nextStakeDiff, + b.chainState.newestHeight) + b.server.txMemPool.PruneExpiredTx(b.chainState.newestHeight) + } + } + + b.updateChainState(newestSha, + newestHeight, + lotteryData.finalState, + lotteryData.poolSize, + lotteryData.ntfnData.Tickets, + missedTickets, + curBlockHeader) + + // Update this peer's latest block height, for future + // potential sync node candidancy. + heightUpdate = int32(newestHeight) + blkShaUpdate = newestSha + + // Allow any clients performing long polling via the + // getblocktemplate RPC to be notified when the new block causes + // their old block template to become stale. + rpcServer := b.server.rpcServer + if rpcServer != nil { + rpcServer.gbtWorkState.NotifyBlockConnected(blockSha) + } } } @@ -677,7 +1258,8 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { if blkShaUpdate != nil && heightUpdate != 0 { bmsg.peer.UpdateLastBlockHeight(heightUpdate) if isOrphan || b.current() { - go b.server.UpdatePeerHeights(blkShaUpdate, int32(heightUpdate), bmsg.peer) + go b.server.UpdatePeerHeights(blkShaUpdate, int32(heightUpdate), + bmsg.peer) } } // Sync the db to disk. @@ -707,7 +1289,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { prevHash := b.nextCheckpoint.Hash b.nextCheckpoint = b.findNextHeaderCheckpoint(prevHeight) if b.nextCheckpoint != nil { - locator := blockchain.BlockLocator([]*wire.ShaHash{prevHash}) + locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash}) err := bmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash) if err != nil { bmgrLog.Warnf("Failed to send getheaders message to "+ @@ -726,7 +1308,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) { b.headersFirstMode = false b.headerList.Init() bmgrLog.Infof("Reached the final checkpoint -- switching to normal mode") - locator := blockchain.BlockLocator([]*wire.ShaHash{blockSha}) + locator := blockchain.BlockLocator([]*chainhash.Hash{blockSha}) err = bmsg.peer.PushGetBlocksMsg(locator, &zeroHash) if err != nil { bmgrLog.Warnf("Failed to send getblocks message to peer %s: %v", @@ -765,6 +1347,7 @@ func (b *blockManager) fetchHeaderBlocks() { } if !haveInv { b.requestedBlocks[*node.sha] = struct{}{} + b.requestedEverBlocks[*node.sha] = 0 b.syncPeer.requestedBlocks[*node.sha] = struct{}{} gdmsg.AddInvVect(iv) numRequested++ @@ -799,7 +1382,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) { // Process all of the received headers ensuring each one connects to the // previous and that checkpoints match. receivedCheckpoint := false - var finalHash *wire.ShaHash + var finalHash *chainhash.Hash for _, blockHeader := range msg.Headers { blockHash := blockHeader.BlockSha() finalHash = &blockHash @@ -870,7 +1453,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) { // This header is not a checkpoint, so request the next batch of // headers starting from the latest known header and ending with the // next checkpoint. - locator := blockchain.BlockLocator([]*wire.ShaHash{finalHash}) + locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash}) err := hmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash) if err != nil { bmgrLog.Warnf("Failed to send getheaders message to "+ @@ -942,9 +1525,11 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) { if lastBlock != -1 && b.current() { exists, err := b.server.db.ExistsSha(&invVects[lastBlock].Hash) if err == nil && exists { - blkHeight, err := b.server.db.FetchBlockHeightBySha(&invVects[lastBlock].Hash) + blkHeight, err := b.server.db.FetchBlockHeightBySha( + &invVects[lastBlock].Hash) if err != nil { - bmgrLog.Warnf("Unable to fetch block height for block (sha: %v), %v", + bmgrLog.Warnf("Unable to fetch block height for block "+ + "(sha: %v), %v", &invVects[lastBlock].Hash, err) } else { imsg.peer.UpdateLastBlockHeight(int32(blkHeight)) @@ -1043,6 +1628,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) { // request. if _, exists := b.requestedBlocks[iv.Hash]; !exists { b.requestedBlocks[iv.Hash] = struct{}{} + b.requestedEverBlocks[iv.Hash] = 0 imsg.peer.requestedBlocks[iv.Hash] = struct{}{} gdmsg.AddInvVect(iv) numRequested++ @@ -1053,6 +1639,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) { // pending request. if _, exists := b.requestedTxns[iv.Hash]; !exists { b.requestedTxns[iv.Hash] = struct{}{} + b.requestedEverTxns[iv.Hash] = 0 imsg.peer.requestedTxns[iv.Hash] = struct{}{} gdmsg.AddInvVect(iv) numRequested++ @@ -1105,6 +1692,12 @@ out: case getSyncPeerMsg: msg.reply <- b.syncPeer + case requestFromPeerMsg: + err := b.requestFromPeer(msg.peer, msg.blocks, msg.txs) + msg.reply <- requestFromPeerResponse{ + err: err, + } + case checkConnectBlockMsg: err := b.blockChain.CheckConnectBlock(msg.block) msg.reply <- err @@ -1118,42 +1711,279 @@ out: err: err, } + case calcNextReqDiffNodeMsg: + difficulty, err := + b.blockChain.CalcNextRequiredDiffFromNode(msg.hash, + msg.timestamp) + msg.reply <- calcNextReqDifficultyResponse{ + difficulty: difficulty, + err: err, + } + + case calcNextReqStakeDifficultyMsg: + stakeDiff, err := b.blockChain.CalcNextRequiredStakeDifficulty() + msg.reply <- calcNextReqStakeDifficultyResponse{ + stakeDifficulty: stakeDiff, + err: err, + } + + case forceReorganizationMsg: + err := b.blockChain.ForceHeadReorganization( + msg.formerBest, + msg.newBest, + b.server.timeSource) + + // Reorganizing has succeeded, so we need to + // update the chain state. + if err == nil { + // Query the db for the latest best block since + // the block that was processed could be on a + // side chain or have caused a reorg. + newestSha, newestHeight, _ := b.server.db.NewestSha() + + // Fetch the required lottery data from the cache; + // it must already be there. + b.blockLotteryDataCacheMutex.Lock() + lotteryData, exists := b.blockLotteryDataCache[*newestSha] + if !exists { + b.blockLotteryDataCacheMutex.Unlock() + msg.reply <- forceReorganizationResponse{ + err: fmt.Errorf("Failed to find lottery data in "+ + "cache while attempting reorganize to block %v", + newestSha), + } + continue + } + b.blockLotteryDataCacheMutex.Unlock() + + r := b.server.rpcServer + if r != nil { + // Update registered websocket clients on the + // current stake difficulty. + nextStakeDiff, err := + b.blockChain.CalcNextRequiredStakeDifficulty() + if err != nil { + bmgrLog.Warnf("Failed to get next stake difficulty "+ + "calculation: %v", err) + } else { + r.ntfnMgr.NotifyStakeDifficulty( + &StakeDifficultyNtfnData{ + *newestSha, + newestHeight, + nextStakeDiff, + }) + b.server.txMemPool.PruneStakeTx(nextStakeDiff, + b.chainState.newestHeight) + b.server.txMemPool.PruneExpiredTx( + b.chainState.newestHeight) + } + } + + missedTickets := b.blockChain.GetMissedTickets() + + curBlockHeader := b.blockChain.GetCurrentBlockHeader() + + b.updateChainState(newestSha, + newestHeight, + lotteryData.finalState, + lotteryData.poolSize, + lotteryData.ntfnData.Tickets, + missedTickets, + curBlockHeader) + } + + msg.reply <- forceReorganizationResponse{ + err: err, + } + + case getBlockFromHashMsg: + b, err := b.blockChain.GetBlockFromHash(&msg.hash) + msg.reply <- getBlockFromHashResponse{ + block: b, + err: err, + } + + case getGenerationMsg: + g, err := b.blockChain.GetGeneration(msg.hash) + msg.reply <- getGenerationResponse{ + hashes: g, + err: err, + } + + case getLotteryDataMsg: + winningTickets, poolSize, finalState, err := + b.blockChain.GetWinningTickets(msg.hash) + msg.reply <- getLotterDataResponse{ + finalState: finalState, + poolSize: uint32(poolSize), + winningTickets: winningTickets, + err: err, + } + + case getTopBlockMsg: + b, err := b.blockChain.GetTopBlock() + msg.reply <- getTopBlockResponse{ + block: b, + err: err, + } + case fetchTransactionStoreMsg: - txStore, err := b.blockChain.FetchTransactionStore(msg.tx) + txStore, err := b.blockChain.FetchTransactionStore(msg.tx, + msg.isTreeValid) msg.reply <- fetchTransactionStoreResponse{ TxStore: txStore, err: err, } case processBlockMsg: - isOrphan, err := b.blockChain.ProcessBlock( - msg.block, b.server.timeSource, - msg.flags) + onMainChain, isOrphan, err := b.blockChain.ProcessBlock( + msg.block, b.server.timeSource, msg.flags) if err != nil { msg.reply <- processBlockResponse{ isOrphan: false, err: err, } + continue } - // Query the db for the latest best block since - // the block that was processed could be on a - // side chain or have caused a reorg. - newestSha, newestHeight, _ := b.server.db.NewestSha() - b.updateChainState(newestSha, newestHeight) + // Get the winning tickets. If they've yet to be broadcasted, + // broadcast them. + b.blockLotteryDataCacheMutex.Lock() + broadcastWinners := false + lotteryData := new(BlockLotteryData) + _, exists := b.blockLotteryDataCache[*msg.block.Sha()] + if !exists { + winningTickets, poolSize, finalState, err := + b.blockChain.GetWinningTickets(*msg.block.Sha()) + if err != nil && int64(msg.block.MsgBlock().Header.Height) >= + b.server.chainParams.StakeValidationHeight-1 { + bmgrLog.Warnf("Stake failure in lottery tickets "+ + "calculation: %v", err) + msg.reply <- processBlockResponse{ + isOrphan: false, + err: err, + } + b.blockLotteryDataCacheMutex.Unlock() + continue + } + + lotteryData.poolSize = uint32(poolSize) + lotteryData.finalState = finalState + lotteryData.ntfnData = &WinningTicketsNtfnData{ + *msg.block.Sha(), + int64(msg.block.MsgBlock().Header.Height), + winningTickets} + b.blockLotteryDataCache[*msg.block.Sha()] = lotteryData + broadcastWinners = true + } else { + lotteryData, _ = b.blockLotteryDataCache[*msg.block.Sha()] + } + + r := b.server.rpcServer + if r != nil && !isOrphan && broadcastWinners && + (msg.block.Height() >= + b.server.chainParams.StakeValidationHeight-1) { + // Notify registered websocket clients of newly + // eligible tickets to vote on. + if _, is := b.blockLotteryDataCache[*msg.block.Sha()]; !is { + r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData) + } + } + b.blockLotteryDataCacheMutex.Unlock() + + // If the block added to the main chain, then we need to + // update the tip locally on block manager. + if onMainChain { + // Query the db for the latest best block since + // the block that was processed could be on a + // side chain or have caused a reorg. + newestSha, newestHeight, _ := b.server.db.NewestSha() + + // Update registered websocket clients on the + // current stake difficulty. + nextStakeDiff, err := + b.blockChain.CalcNextRequiredStakeDifficulty() + if err != nil { + bmgrLog.Warnf("Failed to get next stake difficulty "+ + "calculation: %v", err) + } else { + r.ntfnMgr.NotifyStakeDifficulty( + &StakeDifficultyNtfnData{ + *newestSha, + newestHeight, + nextStakeDiff, + }) + b.server.txMemPool.PruneStakeTx(nextStakeDiff, + b.chainState.newestHeight) + b.server.txMemPool.PruneExpiredTx( + b.chainState.newestHeight) + } + + missedTickets := b.blockChain.GetMissedTickets() + curBlockHeader := b.blockChain.GetCurrentBlockHeader() + + b.updateChainState(newestSha, + newestHeight, + lotteryData.finalState, + lotteryData.poolSize, + lotteryData.ntfnData.Tickets, + missedTickets, + curBlockHeader) + } msg.reply <- processBlockResponse{ isOrphan: isOrphan, err: nil, } + case processTransactionMsg: + err := b.server.txMemPool.ProcessTransaction(msg.tx, + msg.allowOrphans, msg.rateLimit) + msg.reply <- processTransactionResponse{ + err: err, + } + case isCurrentMsg: msg.reply <- b.current() + case missedTicketsMsg: + tickets, err := b.blockChain.MissedTickets() + msg.reply <- missedTicketsResponse{ + Tickets: tickets, + err: err, + } + case pauseMsg: // Wait until the sender unpauses the manager. <-msg.unpause + case ticketsForAddressMsg: + tickets, err := b.blockChain.TicketsWithAddress(msg.Address) + msg.reply <- ticketsForAddressResponse{ + Tickets: tickets, + err: err, + } + + case getCurrentTemplateMsg: + cur := deepCopyBlockTemplate(b.cachedCurrentTemplate) + msg.reply <- getCurrentTemplateResponse{ + Template: cur, + } + + case setCurrentTemplateMsg: + b.cachedCurrentTemplate = deepCopyBlockTemplate(msg.Template) + msg.reply <- setCurrentTemplateResponse{} + + case getParentTemplateMsg: + par := deepCopyBlockTemplate(b.cachedParentTemplate) + msg.reply <- getParentTemplateResponse{ + Template: par, + } + + case setParentTemplateMsg: + b.cachedParentTemplate = deepCopyBlockTemplate(msg.Template) + msg.reply <- setParentTemplateResponse{} + default: bmgrLog.Warnf("Invalid message type in block "+ "handler: %T", msg) @@ -1168,25 +1998,67 @@ out: bmgrLog.Trace("Block handler done") } -// handleNotifyMsg handles notifications from blockchain. It does things such +// handleNotifyMsg handles notifications from blockhain. It does things such // as request orphan block parents and relay accepted blocks to connected peers. func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) { switch notification.Type { // A block has been accepted into the block chain. Relay it to other // peers. + case blockchain.NTBlockAccepted: // Don't relay if we are not current. Other peers that are // current should already know about it. - if !b.current() { return } - block, ok := notification.Data.(*btcutil.Block) + band, ok := notification.Data.(*blockchain.BlockAcceptedNtfnsData) if !ok { - bmgrLog.Warnf("Chain accepted notification is not a block.") + bmgrLog.Warnf("Chain accepted notification is not " + + "BlockAcceptedNtfnsData.") break } + block := band.Block + r := b.server.rpcServer + + // Determine the winning tickets for this block, if it hasn't + // already been sent out. + if block.Height() >= + b.server.chainParams.StakeValidationHeight-1 && + r != nil { + + hash := block.Sha() + b.blockLotteryDataCacheMutex.Lock() + lotteryData := new(BlockLotteryData) + + _, exists := b.blockLotteryDataCache[*hash] + if !exists { + // Obtain the winning tickets for this block. handleNotifyMsg + // should be safe for concurrent access of things contained + // within blockchain. + wt, ps, fs, err := b.blockChain.GetWinningTickets(*hash) + if err != nil { + b.blockLotteryDataCacheMutex.Unlock() + bmgrLog.Errorf("Couldn't calculate winning tickets for "+ + "accepted block %v: %v", block.Sha(), err.Error()) + } else { + lotteryData.finalState = fs + lotteryData.poolSize = uint32(ps) + + lotteryData.ntfnData = &WinningTicketsNtfnData{ + *hash, + int64(block.MsgBlock().Header.Height), + wt} + b.blockLotteryDataCache[*hash] = lotteryData + + // Notify registered websocket clients of newly + // eligible tickets to vote on. + r.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData) + b.blockLotteryDataCache[*hash] = lotteryData + b.blockLotteryDataCacheMutex.Unlock() + } + } + } // Generate the inventory vector and relay it. iv := wire.NewInvVect(wire.InvTypeBlock, block.Sha()) @@ -1194,32 +2066,77 @@ func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) { // A block has been connected to the main block chain. case blockchain.NTBlockConnected: - block, ok := notification.Data.(*btcutil.Block) + blockSlice, ok := notification.Data.([]*dcrutil.Block) if !ok { - bmgrLog.Warnf("Chain connected notification is not a block.") + bmgrLog.Warnf("Chain connected notification is not a block slice.") break } - // Remove all of the transactions (except the coinbase) in the - // connected block from the transaction pool. Secondly, remove any + if len(blockSlice) != 2 { + bmgrLog.Warnf("Chain connected notification is wrong size slice.") + break + } + + block := blockSlice[0] + parentBlock := blockSlice[1] + + // Check and see if the regular tx tree of the previous block was + // invalid or not. If it wasn't, then we need to restore all the tx + // from this block into the mempool. They may end up being spent in + // the regular tx tree of the current block, for which there is code + // below. + txTreeRegularValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, + dcrutil.BlockValid) + + if !txTreeRegularValid { + for _, tx := range parentBlock.Transactions()[1:] { + _, err := b.server.txMemPool.MaybeAcceptTransaction(tx, false, + true) + if err != nil { + // Remove the transaction and all transactions + // that depend on it if it wasn't accepted into + // the transaction pool. Probably this will mostly + // throw errors, as the majority will already be + // in the mempool. + b.server.txMemPool.RemoveTransaction(tx, true) + } + } + } + + // Remove all of the regular and stake transactions in the + // connected block from the transaction pool. Also, remove any // transactions which are now double spends as a result of these - // new transactions. Finally, remove any transaction that is + // new transactions. Note that removing a transaction from // no longer an orphan. Note that removing a transaction from - // pool also removes any transactions which depend on it, - // recursively. - for _, tx := range block.Transactions()[1:] { - b.server.txMemPool.RemoveTransaction(tx) + // transaction are NOT removed recursively because they are still + // recursively. Do not remove the coinbase [1:] of the regular tx + // tree. + for _, tx := range parentBlock.Transactions()[1:] { + b.server.txMemPool.RemoveTransaction(tx, false) b.server.txMemPool.RemoveDoubleSpends(tx) b.server.txMemPool.RemoveOrphan(tx.Sha()) b.server.txMemPool.ProcessOrphans(tx.Sha()) } + for _, stx := range block.STransactions()[0:] { + b.server.txMemPool.RemoveTransaction(stx, false) + b.server.txMemPool.RemoveDoubleSpends(stx) + b.server.txMemPool.RemoveOrphan(stx.Sha()) + b.server.txMemPool.ProcessOrphans(stx.Sha()) + } + if r := b.server.rpcServer; r != nil { // Now that this block is in the blockchain we can mark // all the transactions (except the coinbase) as no // longer needing rebroadcasting. - for _, tx := range block.Transactions()[1:] { - iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) + if txTreeRegularValid { + for _, tx := range parentBlock.Transactions()[1:] { + iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) + b.server.RemoveRebroadcastInventory(iv) + } + } + for _, stx := range block.STransactions()[0:] { + iv := wire.NewInvVect(wire.InvTypeTx, stx.Sha()) b.server.RemoveRebroadcastInventory(iv) } @@ -1229,28 +2146,89 @@ func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) { // If we're maintaing the address index, and it is up to date // then update it based off this new block. - if cfg.AddrIndex && b.server.addrIndexer.IsCaughtUp() { - b.server.addrIndexer.UpdateAddressIndex(block) + if !cfg.NoAddrIndex && b.server.addrIndexer.IsCaughtUp() { + err := b.server.addrIndexer.InsertBlock(block, parentBlock) + if err != nil { + bmgrLog.Errorf("AddrIndexManager error: %v", err.Error()) + } + } + + // Stake tickets are spent or missed from the most recently connected block. + case blockchain.NTSpentAndMissedTickets: + tnd, ok := notification.Data.(*blockchain.TicketNotificationsData) + if !ok { + bmgrLog.Warnf("Tickets connected notification is not " + + "TicketNotificationsData") + break + } + + if r := b.server.rpcServer; r != nil { + r.ntfnMgr.NotifySpentAndMissedTickets(tnd) + } + + // Stake tickets are matured from the most recently connected block. + case blockchain.NTNewTickets: + tnd, ok := notification.Data.(*blockchain.TicketNotificationsData) + if !ok { + bmgrLog.Warnf("Tickets connected notification is not " + + "TicketNotificationsData") + break + } + + if r := b.server.rpcServer; r != nil { + r.ntfnMgr.NotifyNewTickets(tnd) } // A block has been disconnected from the main block chain. case blockchain.NTBlockDisconnected: - block, ok := notification.Data.(*btcutil.Block) + blockSlice, ok := notification.Data.([]*dcrutil.Block) if !ok { - bmgrLog.Warnf("Chain disconnected notification is not a block.") + bmgrLog.Warnf("Chain disconnected notification is not a block slice.") break } - // Reinsert all of the transactions (except the coinbase) into - // the transaction pool. - for _, tx := range block.Transactions()[1:] { - _, err := b.server.txMemPool.MaybeAcceptTransaction(tx, - false, false) + if len(blockSlice) != 2 { + bmgrLog.Warnf("Chain disconnected notification is wrong size slice.") + break + } + + block := blockSlice[0] + parentBlock := blockSlice[1] + + // If the parent tx tree was invalidated, we need to remove these + // tx from the mempool as the next incoming block may alternatively + // validate them. + txTreeRegularValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, + dcrutil.BlockValid) + + if !txTreeRegularValid { + for _, tx := range parentBlock.Transactions()[1:] { + b.server.txMemPool.RemoveTransaction(tx, false) + b.server.txMemPool.RemoveDoubleSpends(tx) + b.server.txMemPool.RemoveOrphan(tx.Sha()) + b.server.txMemPool.ProcessOrphans(tx.Sha()) + } + } + + // Reinsert all of the transactions (except the coinbase) from the parent + // tx tree regular into the transaction pool. + for _, tx := range parentBlock.Transactions()[1:] { + _, err := b.server.txMemPool.MaybeAcceptTransaction(tx, false, true) if err != nil { // Remove the transaction and all transactions // that depend on it if it wasn't accepted into // the transaction pool. - b.server.txMemPool.RemoveTransaction(tx) + b.server.txMemPool.RemoveTransaction(tx, true) + } + } + + for _, tx := range block.STransactions()[0:] { + _, err := b.server.txMemPool.MaybeAcceptTransaction(tx, false, true) + if err != nil { + // Remove the transaction and all transactions + // that depend on it if it wasn't accepted into + // the transaction pool. + b.server.txMemPool.RemoveTransaction(tx, true) } } @@ -1258,6 +2236,28 @@ func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) { if r := b.server.rpcServer; r != nil { r.ntfnMgr.NotifyBlockDisconnected(block) } + + // If we're maintaing the address index, and it is up to date + // then update it based off this removed block. + if !cfg.NoAddrIndex && b.server.addrIndexer.IsCaughtUp() { + err := b.server.addrIndexer.RemoveBlock(block, parentBlock) + if err != nil { + bmgrLog.Errorf("AddrIndexManager error: %v", err.Error()) + } + } + + // The blockchain is reorganizing. + case blockchain.NTReorganization: + rd, ok := notification.Data.(*blockchain.ReorganizationNtfnsData) + if !ok { + bmgrLog.Warnf("Chain reorganization notification is malformed") + break + } + + // Notify registered websocket clients. + if r := b.server.rpcServer; r != nil { + r.ntfnMgr.NotifyReorganization(rd) + } } } @@ -1273,7 +2273,7 @@ func (b *blockManager) NewPeer(p *peer) { // QueueTx adds the passed transaction message and peer to the block handling // queue. -func (b *blockManager) QueueTx(tx *btcutil.Tx, p *peer) { +func (b *blockManager) QueueTx(tx *dcrutil.Tx, p *peer) { // Don't accept more transactions if we're shutting down. if atomic.LoadInt32(&b.shutdown) != 0 { p.txProcessed <- struct{}{} @@ -1284,7 +2284,7 @@ func (b *blockManager) QueueTx(tx *btcutil.Tx, p *peer) { } // QueueBlock adds the passed block message and peer to the block handling queue. -func (b *blockManager) QueueBlock(block *btcutil.Block, p *peer) { +func (b *blockManager) QueueBlock(block *dcrutil.Block, p *peer) { // Don't accept more blocks if we're shutting down. if atomic.LoadInt32(&b.shutdown) != 0 { p.blockProcessed <- struct{}{} @@ -1361,11 +2361,101 @@ func (b *blockManager) SyncPeer() *peer { return <-reply } +// RequestFromPeer allows an outside caller to request blocks or transactions +// from a peer. The requests are logged in the blockmanager's internal map of +// requests so they do not later ban the peer for sending the respective data. +func (b *blockManager) RequestFromPeer(p *peer, blocks, txs []*chainhash.Hash) error { + reply := make(chan requestFromPeerResponse) + b.msgChan <- requestFromPeerMsg{peer: p, blocks: blocks, txs: txs, + reply: reply} + response := <-reply + + return response.err +} + +func (b *blockManager) requestFromPeer(p *peer, blocks, txs []*chainhash.Hash) error { + msgResp := wire.NewMsgGetData() + + // Add the blocks to the request. + for _, bh := range blocks { + // If we've already requested this block, skip it. + _, alreadyReqP := p.requestedBlocks[*bh] + _, alreadyReqB := b.requestedBlocks[*bh] + + if alreadyReqP || alreadyReqB { + continue + } + + // Check to see if we already have this block, too. + // If so, skip. + exists, err := b.blockChain.HaveBlock(bh) + if err != nil { + return err + } + if exists { + continue + } + + err = msgResp.AddInvVect(wire.NewInvVect(wire.InvTypeBlock, bh)) + if err != nil { + return fmt.Errorf("unexpected error encountered building request "+ + "for mining state block %v: %v", + bh, err.Error()) + } + + p.requestedBlocks[*bh] = struct{}{} + b.requestedBlocks[*bh] = struct{}{} + b.requestedEverBlocks[*bh] = 0 + } + + // Add the vote transactions to the request. + for _, vh := range txs { + // If we've already requested this transaction, skip it. + _, alreadyReqP := p.requestedTxns[*vh] + _, alreadyReqB := b.requestedTxns[*vh] + + if alreadyReqP || alreadyReqB { + continue + } + + // Ask the transaction memory pool if the transaction is known + // to it in any form (main pool or orphan). + if b.server.txMemPool.HaveTransaction(vh) { + continue + } + + // Check if the transaction exists from the point of view of the + // end of the main chain. + exists, err := b.server.db.ExistsTxSha(vh) + if err != nil { + return err + } + if exists { + continue + } + + err = msgResp.AddInvVect(wire.NewInvVect(wire.InvTypeTx, vh)) + if err != nil { + return fmt.Errorf("unexpected error encountered building request "+ + "for mining state vote %v: %v", + vh, err.Error()) + } + + p.requestedTxns[*vh] = struct{}{} + b.requestedTxns[*vh] = struct{}{} + b.requestedEverTxns[*vh] = 0 + } + + p.QueueMessage(msgResp, nil) + + return nil +} + // CheckConnectBlock performs several checks to confirm connecting the passed // block to the main chain does not violate any rules. This function makes use // of CheckConnectBlock on an internal instance of a block chain. It is funneled -// through the block manager since btcchain is not safe for concurrent access. -func (b *blockManager) CheckConnectBlock(block *btcutil.Block) error { +// through the block manager since blockchain is not safe for concurrent access. +func (b *blockManager) CheckConnectBlock(block *dcrutil.Block) error { reply := make(chan error) b.msgChan <- checkConnectBlockMsg{block: block, reply: reply} return <-reply @@ -1374,7 +2464,7 @@ func (b *blockManager) CheckConnectBlock(block *btcutil.Block) error { // CalcNextRequiredDifficulty calculates the required difficulty for the next // block after the current main chain. This function makes use of // CalcNextRequiredDifficulty on an internal instance of a block chain. It is -// funneled through the block manager since btcchain is not safe for concurrent +// funneled through the block manager since blockchain is not safe for concurrent // access. func (b *blockManager) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { reply := make(chan calcNextReqDifficultyResponse) @@ -1383,25 +2473,125 @@ func (b *blockManager) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, return response.difficulty, response.err } -// FetchTransactionStore makes use of FetchTransactionStore on an internal -// instance of a block chain. It is safe for concurrent access. -func (b *blockManager) FetchTransactionStore(tx *btcutil.Tx) (blockchain.TxStore, error) { - reply := make(chan fetchTransactionStoreResponse, 1) - b.msgChan <- fetchTransactionStoreMsg{tx: tx, reply: reply} +// CalcNextRequiredDiffNode calculates the required difficulty for the next +// block after the passed block hash. This function makes use of +// CalcNextRequiredDiffFromNode on an internal instance of a block chain. It is +// funneled through the block manager since blockchain is not safe for concurrent +// access. +func (b *blockManager) CalcNextRequiredDiffNode(hash *chainhash.Hash, + timestamp time.Time) (uint32, error) { + reply := make(chan calcNextReqDifficultyResponse) + b.msgChan <- calcNextReqDiffNodeMsg{ + hash: hash, + timestamp: timestamp, + reply: reply, + } response := <-reply - return response.TxStore, response.err + return response.difficulty, response.err +} + +// CalcNextRequiredStakeDifficulty calculates the required Stake difficulty for +// the next block after the current main chain. This function makes use of +// CalcNextRequiredStakeDifficulty on an internal instance of a block chain. It is +// funneled through the block manager since blockchain is not safe for concurrent +// access. +func (b *blockManager) CalcNextRequiredStakeDifficulty() (int64, error) { + reply := make(chan calcNextReqStakeDifficultyResponse) + b.msgChan <- calcNextReqStakeDifficultyMsg{reply: reply} + response := <-reply + return response.stakeDifficulty, response.err +} + +// ForceReorganization returns the hashes of all the children of a parent for the +// block hash that is passed to the function. It is funneled through the block +// manager since blockchain is not safe for concurrent access. +func (b *blockManager) ForceReorganization(formerBest, newBest chainhash.Hash) error { + reply := make(chan forceReorganizationResponse) + b.msgChan <- forceReorganizationMsg{ + formerBest: formerBest, + newBest: newBest, + reply: reply} + response := <-reply + return response.err +} + +// GetGeneration returns the hashes of all the children of a parent for the +// block hash that is passed to the function. It is funneled through the block +// manager since blockchain is not safe for concurrent access. +func (b *blockManager) GetGeneration(h chainhash.Hash) ([]chainhash.Hash, error) { + reply := make(chan getGenerationResponse) + b.msgChan <- getGenerationMsg{hash: h, reply: reply} + response := <-reply + return response.hashes, response.err +} + +// GetBlockFromHash returns a block for some hash from the block manager, so +// long as the block exists. It is funneled through the block manager since +// blockchain is not safe for concurrent access. +func (b *blockManager) GetBlockFromHash(h chainhash.Hash) (*dcrutil.Block, error) { + reply := make(chan getBlockFromHashResponse) + b.msgChan <- getBlockFromHashMsg{hash: h, reply: reply} + response := <-reply + return response.block, response.err +} + +// GetLotteryData returns the hashes of all the winning tickets for a given +// orphan block along with the pool size and the final state. It is funneled +// through the block manager since blockchain is not safe for concurrent access. +func (b *blockManager) GetLotteryData(hash chainhash.Hash) ([]chainhash.Hash, + uint32, [6]byte, error) { + reply := make(chan getLotterDataResponse) + b.msgChan <- getLotteryDataMsg{ + hash: hash, + reply: reply} + response := <-reply + return response.winningTickets, response.poolSize, response.finalState, + response.err +} + +// GetTopBlockFromChain obtains the current top block from HEAD of the blockchain. +// Returns a pointer to the cached copy of the block in memory. +func (b *blockManager) GetTopBlockFromChain() (*dcrutil.Block, error) { + reply := make(chan getTopBlockResponse) + b.msgChan <- getTopBlockMsg{reply: reply} + response := <-reply + return &response.block, response.err } // ProcessBlock makes use of ProcessBlock on an internal instance of a block -// chain. It is funneled through the block manager since btcchain is not safe +// chain. It is funneled through the block manager since blockchain is not safe // for concurrent access. -func (b *blockManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) { +func (b *blockManager) ProcessBlock(block *dcrutil.Block, + flags blockchain.BehaviorFlags) (bool, error) { reply := make(chan processBlockResponse, 1) b.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply} response := <-reply return response.isOrphan, response.err } +// ProcessTransaction makes use of ProcessTransaction on an internal instance of +// a block chain. It is funneled through the block manager since blockchain is +// not safe for concurrent access. +func (b *blockManager) ProcessTransaction(tx *dcrutil.Tx, allowOrphans bool, + rateLimit bool) error { + reply := make(chan processTransactionResponse, 1) + b.msgChan <- processTransactionMsg{tx, allowOrphans, rateLimit, reply} + response := <-reply + return response.err +} + +// FetchTransactionStore makes use of FetchTransactionStore on an internal +// instance of a block chain. It is safe for concurrent access. +func (b *blockManager) FetchTransactionStore(tx *dcrutil.Tx, + isTreeValid bool) (blockchain.TxStore, error) { + reply := make(chan fetchTransactionStoreResponse, 1) + b.msgChan <- fetchTransactionStoreMsg{tx: tx, + isTreeValid: isTreeValid, + reply: reply} + response := <-reply + return response.TxStore, response.err +} + // IsCurrent returns whether or not the block manager believes it is synced with // the connected peers. func (b *blockManager) IsCurrent() bool { @@ -1410,6 +2600,14 @@ func (b *blockManager) IsCurrent() bool { return <-reply } +// MissedTickets returns a slice of missed ticket hashes. +func (b *blockManager) MissedTickets() (stake.SStxMemMap, error) { + reply := make(chan missedTicketsResponse) + b.msgChan <- missedTicketsMsg{reply: reply} + response := <-reply + return response.Tickets, response.err +} + // Pause pauses the block manager until the returned channel is closed. // // Note that while paused, all peer and block processing is halted. The @@ -1420,7 +2618,48 @@ func (b *blockManager) Pause() chan<- struct{} { return c } -// newBlockManager returns a new bitcoin block manager. +// TicketsForAddress returns a list of ticket hashes owned by the address. +func (b *blockManager) TicketsForAddress(address dcrutil.Address) ( + []chainhash.Hash, error) { + reply := make(chan ticketsForAddressResponse) + b.msgChan <- ticketsForAddressMsg{Address: address, reply: reply} + response := <-reply + return response.Tickets, response.err +} + +// GetCurrentTemplate gets the current block template for mining. +func (b *blockManager) GetCurrentTemplate() *BlockTemplate { + reply := make(chan getCurrentTemplateResponse) + b.msgChan <- getCurrentTemplateMsg{reply: reply} + response := <-reply + return response.Template +} + +// SetCurrentTemplate sets the current block template for mining. +func (b *blockManager) SetCurrentTemplate(bt *BlockTemplate) { + reply := make(chan setCurrentTemplateResponse) + b.msgChan <- setCurrentTemplateMsg{Template: bt, reply: reply} + <-reply + return +} + +// GetParentTemplate gets the current parent block template for mining. +func (b *blockManager) GetParentTemplate() *BlockTemplate { + reply := make(chan getParentTemplateResponse) + b.msgChan <- getParentTemplateMsg{reply: reply} + response := <-reply + return response.Template +} + +// SetParentTemplate sets the current parent block template for mining. +func (b *blockManager) SetParentTemplate(bt *BlockTemplate) { + reply := make(chan setParentTemplateResponse) + b.msgChan <- setParentTemplateMsg{Template: bt, reply: reply} + <-reply + return +} + +// newBlockManager returns a new decred block manager. // Use Start to begin processing asynchronous block and inv updates. func newBlockManager(s *server) (*blockManager, error) { newestHash, height, err := s.db.NewestSha() @@ -1429,16 +2668,20 @@ func newBlockManager(s *server) (*blockManager, error) { } bm := blockManager{ - server: s, - requestedTxns: make(map[wire.ShaHash]struct{}), - requestedBlocks: make(map[wire.ShaHash]struct{}), - progressLogger: newBlockProgressLogger("Processed", bmgrLog), - msgChan: make(chan interface{}, cfg.MaxPeers*3), - headerList: list.New(), - quit: make(chan struct{}), + server: s, + requestedTxns: make(map[chainhash.Hash]struct{}), + requestedEverTxns: make(map[chainhash.Hash]uint8), + requestedBlocks: make(map[chainhash.Hash]struct{}), + requestedEverBlocks: make(map[chainhash.Hash]uint8), + progressLogger: newBlockProgressLogger("Processed", bmgrLog), + lastBlockLogTime: time.Now(), + msgChan: make(chan interface{}, cfg.MaxPeers*3), + headerList: list.New(), + AggressiveMining: !cfg.NonAggressive, + quit: make(chan struct{}), } bm.progressLogger = newBlockProgressLogger("Processed", bmgrLog) - bm.blockChain = blockchain.New(s.db, s.chainParams, bm.handleNotifyMsg) + bm.blockChain = blockchain.New(s.db, s.tmdb, s.chainParams, bm.handleNotifyMsg) bm.blockChain.DisableCheckpoints(cfg.DisableCheckpoints) if !cfg.DisableCheckpoints { // Initialize the next checkpoint based on the current height. @@ -1460,39 +2703,36 @@ func newBlockManager(s *server) (*blockManager, error) { // Initialize the chain state now that the intial block node index has // been generated. - bm.updateChainState(newestHash, height) + + // Query the DB for the current winning ticket data. + wt, ps, fs, err := bm.blockChain.GetWinningTickets(*newestHash) + if err != nil { + return nil, err + } + + // Query the DB for the currently missed tickets. + missedTickets := bm.blockChain.GetMissedTickets() + if err != nil && height >= bm.server.chainParams.StakeValidationHeight { + return nil, err + } + + // Retrieve the current block header. + curBlockHeader := bm.blockChain.GetCurrentBlockHeader() + + bm.updateChainState(newestHash, + height, + fs, + uint32(ps), + wt, + missedTickets, + curBlockHeader) + + bm.blockLotteryDataCacheMutex = new(sync.Mutex) + bm.blockLotteryDataCache = make(map[chainhash.Hash]*BlockLotteryData) return &bm, nil } -// removeRegressionDB removes the existing regression test database if running -// in regression test mode and it already exists. -func removeRegressionDB(dbPath string) error { - // Dont do anything if not in regression test mode. - if !cfg.RegressionTest { - return nil - } - - // Remove the old regression test database if it already exists. - fi, err := os.Stat(dbPath) - if err == nil { - btcdLog.Infof("Removing regression test database from '%s'", dbPath) - if fi.IsDir() { - err := os.RemoveAll(dbPath) - if err != nil { - return err - } - } else { - err := os.Remove(dbPath) - if err != nil { - return err - } - } - } - - return nil -} - // dbPath returns the path to the block database given a database type. func blockDbPath(dbType string) string { // The database name is based on the database type. @@ -1504,10 +2744,10 @@ func blockDbPath(dbType string) string { return dbPath } -// warnMultipeDBs shows a warning if multiple block database types are detected. +// warnMultipleDBs shows a warning if multiple block database types are detected. // This is not a situation most users want. It is handy for development however // to support multiple side-by-side databases. -func warnMultipeDBs() { +func warnMultipleDBs() { // This is intentionally not using the known db types which depend // on the database types compiled into the binary since we want to // detect legacy db types as well. @@ -1528,7 +2768,7 @@ func warnMultipeDBs() { // Warn if there are extra databases. if len(duplicateDbPaths) > 0 { selectedDbPath := blockDbPath(cfg.DbType) - btcdLog.Warnf("WARNING: There are multiple block chain databases "+ + dcrdLog.Warnf("WARNING: There are multiple block chain databases "+ "using different database types.\nYou probably don't "+ "want to waste disk space by having more than one.\n"+ "Your current database is located at [%v].\nThe "+ @@ -1542,34 +2782,30 @@ func warnMultipeDBs() { // such warning the user if there are multiple databases which consume space on // the file system and ensuring the regression test database is clean when in // regression test mode. -func setupBlockDB() (database.Db, error) { +func setupBlockDB() (dcrdb.Db, error) { // The memdb backend does not have a file path associated with it, so // handle it uniquely. We also don't want to worry about the multiple // database type warnings when running with the memory database. if cfg.DbType == "memdb" { - btcdLog.Infof("Creating block database in memory.") - db, err := database.CreateDB(cfg.DbType) + dcrdLog.Infof("Creating block database in memory.") + database, err := dcrdb.CreateDB(cfg.DbType) if err != nil { return nil, err } - return db, nil + return database, nil } - warnMultipeDBs() + warnMultipleDBs() // The database name is based on the database type. dbPath := blockDbPath(cfg.DbType) - // The regression test is special in that it needs a clean database for - // each run, so remove it now if it already exists. - removeRegressionDB(dbPath) - - btcdLog.Infof("Loading block database from '%s'", dbPath) - db, err := database.OpenDB(cfg.DbType, dbPath) + dcrdLog.Infof("Loading block database from '%s'", dbPath) + database, err := dcrdb.OpenDB(cfg.DbType, dbPath) if err != nil { // Return the error if it's not because the database // doesn't exist. - if err != database.ErrDbDoesNotExist { + if err != dcrdb.ErrDbDoesNotExist { return nil, err } @@ -1578,43 +2814,139 @@ func setupBlockDB() (database.Db, error) { if err != nil { return nil, err } - db, err = database.CreateDB(cfg.DbType, dbPath) + database, err = dcrdb.CreateDB(cfg.DbType, dbPath) if err != nil { return nil, err } } - return db, nil + return database, nil +} + +// dumpBlockChain dumps a map of the blockchain blocks as serialized bytes. +func dumpBlockChain(height int64, db dcrdb.Db) error { + blockchain := make(map[int64][]byte) + for i := int64(0); i <= height; i++ { + // Fetch blocks and put them in the map + sha, err := db.FetchBlockShaByHeight(i) + if err != nil { + return err + } + + block, err := db.FetchBlockBySha(sha) + if err != nil { + return err + } + + blockBytes, err := block.Bytes() + if err != nil { + return err + } + blockchain[i] = blockBytes + } + + // Serialize the map into a buffer + w := new(bytes.Buffer) + encoder := gob.NewEncoder(w) + if err := encoder.Encode(blockchain); err != nil { + return err + } + + // Write the buffer to disk + err := ioutil.WriteFile(cfg.DumpBlockchain, w.Bytes(), 0664) + if err != nil { + return err + } + + return nil } // loadBlockDB opens the block database and returns a handle to it. -func loadBlockDB() (database.Db, error) { +func loadBlockDB() (dcrdb.Db, error) { db, err := setupBlockDB() if err != nil { return nil, err } - // Get the latest block height from the database. + // Get the latest block height from the db. _, height, err := db.NewestSha() if err != nil { db.Close() return nil, err } - // Insert the appropriate genesis block for the bitcoin network being + // Insert the appropriate genesis block for the decred network being // connected to if needed. if height == -1 { - genesis := btcutil.NewBlock(activeNetParams.GenesisBlock) + genesis := dcrutil.NewBlock(activeNetParams.GenesisBlock) + genesis.SetHeight(int64(0)) _, err := db.InsertBlock(genesis) if err != nil { db.Close() return nil, err } - btcdLog.Infof("Inserted genesis block %v", + dcrdLog.Infof("Inserted genesis block %v", activeNetParams.GenesisHash) height = 0 } - btcdLog.Infof("Block database loaded with block height %d", height) + dcrdLog.Infof("Block database loaded with block height %d", height) + + if cfg.DumpBlockchain != "" { + dumpBlockChain(height, db) + return nil, errors.New("Block database dump to map completed, closing.") + } + return db, nil } + +// loadTicketDB opens the ticket database and returns a handle to it. +func loadTicketDB(db dcrdb.Db, + chainParams *chaincfg.Params) (*stake.TicketDB, error) { + path := cfg.DataDir + filename := filepath.Join(path, "ticketdb.gob") + + // Check to see if the tmdb exists on disk. + tmdbExists := true + if _, err := os.Stat(filename); os.IsNotExist(err) { + tmdbExists = false + } + + var tmdb stake.TicketDB + + if !tmdbExists { + // Load a blank copy of the ticket database and sync it. + tmdb.Initialize(chainParams, db) + + // Get the latest block height from the db. + _, curHeight, err := db.NewestSha() + if err != nil { + return nil, err + } + dcrdLog.Infof("Block ticket database initialized empty") + + if curHeight > 0 { + dcrdLog.Infof("Db non-empty, resyncing ticket DB") + err := tmdb.RescanTicketDB() + + if err != nil { + return nil, err + } + } + return &tmdb, nil + } else { + dcrdLog.Infof("Loading ticket database from disk") + err := tmdb.LoadTicketDBs(path, + "ticketdb.gob", + chainParams, + db) + + if err != nil { + return nil, err + } + } + dcrdLog.Infof("Ticket DB loaded with top block height %v", + tmdb.GetTopBlock()) + + return &tmdb, nil +} diff --git a/btcec/README.md b/btcec/README.md deleted file mode 100644 index 55ab1904..00000000 --- a/btcec/README.md +++ /dev/null @@ -1,84 +0,0 @@ -btcec -===== - -[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] -(https://travis-ci.org/btcsuite/btcec) - -Package btcec implements elliptic curve cryptography needed for working with -Bitcoin (secp256k1 only for now). It is designed so that it may be used with the -standard crypto/ecdsa packages provided with go. A comprehensive suite of test -is provided to ensure proper functionality. Package btcec was originally based -on work from ThePiachu which is licensed under the same terms as Go, but it has -signficantly diverged since then. The btcsuite developers original is licensed -under the liberal ISC license. - -Although this package was primarily written for btcd, it has intentionally been -designed so it can be used as a standalone package for any projects needing to -use secp256k1 elliptic curve cryptography. - -## Documentation - -[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/btcec?status.png)] -(http://godoc.org/github.com/btcsuite/btcd/btcec) - -Full `go doc` style documentation for the project can be viewed online without -installing this package by using the GoDoc site -[here](http://godoc.org/github.com/btcsuite/btcd/btcec). - -You can also view the documentation locally once the package is installed with -the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/btcec - -## Installation - -```bash -$ go get github.com/btcsuite/btcd/btcec -``` - -## Examples - -* [Sign Message] - (http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--SignMessage) - Demonstrates signing a message with a secp256k1 private key that is first - parsed form raw bytes and serializing the generated signature. - -* [Verify Signature] - (http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--VerifySignature) - Demonstrates verifying a secp256k1 signature against a public key that is - first parsed from raw bytes. The signature is also parsed from raw bytes. - -* [Encryption] - (http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--EncryptMessage) - Demonstrates encrypting a message for a public key that is first parsed from - raw bytes, then decrypting it using the corresponding private key. - -* [Decryption] - (http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--DecryptMessage) - Demonstrates decrypting a message using a private key that is first parsed - from raw bytes. - -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - -## License - -Package btcec is licensed under the [copyfree](http://copyfree.org) ISC License -except for btcec.go and btcec_test.go which is under the same license as Go. - diff --git a/chaincfg/README.md b/chaincfg/README.md index 467eac80..83e01348 100644 --- a/chaincfg/README.md +++ b/chaincfg/README.md @@ -1,17 +1,15 @@ chaincfg ======== -[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)] -(https://travis-ci.org/btcsuite/btcd) [![ISC License] (http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) Package chaincfg defines chain configuration parameters for the three standard -Bitcoin networks and provides the ability for callers to define their own custom -Bitcoin networks. +Decred networks and provides the ability for callers to define their own custom +Decred networks. -Although this package was primarily written for btcd, it has intentionally been +Although this package was primarily written for dcrd, it has intentionally been designed so it can be used as a standalone package for any projects needing to -use parameters for the standard Bitcoin networks or for projects needing to +use parameters for the standard Decred networks or for projects needing to define their own network. ## Sample Use @@ -24,11 +22,11 @@ import ( "fmt" "log" - "github.com/btcsuite/btcutil" - "github.com/btcsuite/btcd/chaincfg" + "github.com/decred/dcrutil" + "github.com/decred/dcrd/chaincfg" ) -var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network") +var testnet = flag.Bool("testnet", false, "operate on the testnet Decred network") // By default (without -testnet), use mainnet. var chainParams = &chaincfg.MainNetParams @@ -38,7 +36,7 @@ func main() { // Modify active network parameters if operating on testnet. if *testnet { - chainParams = &chaincfg.TestNet3Params + chainParams = &chaincfg.TestNetParams } // later... @@ -56,42 +54,22 @@ func main() { ## Documentation [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/btcsuite/btcd/chaincfg) +(http://godoc.org/github.com/decred/dcrd/chaincfg) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site -[here](http://godoc.org/github.com/btcsuite/btcd/chaincfg). +[here](http://godoc.org/github.com/decred/dcrd/chaincfg). You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/chaincfg +http://localhost:6060/pkg/github.com/decred/dcrd/chaincfg ## Installation ```bash -$ go get github.com/btcsuite/btcd/chaincfg +$ go get github.com/decred/dcrd/chaincfg ``` -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - ## License Package chaincfg is licensed under the [copyfree](http://copyfree.org) ISC diff --git a/chaincfg/chainec/chainec.go b/chaincfg/chainec/chainec.go new file mode 100644 index 00000000..0b2bb946 --- /dev/null +++ b/chaincfg/chainec/chainec.go @@ -0,0 +1,232 @@ +// Copyright (c) 2015 The Decred Developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "crypto/ecdsa" + "io" + "math/big" +) + +// PublicKey is an interface representing a public key and its associated +// functions. +type PublicKey interface { + // Serialize is the default serialization method. + Serialize() []byte + + // SerializeUncompressed serializes to the uncompressed format (if + // available). + SerializeUncompressed() []byte + + // SerializeCompressed serializes to the compressed format (if + // available). + SerializeCompressed() []byte + + // SerializeHybrid serializes to the hybrid format (if + // available). + SerializeHybrid() []byte + + // ToECDSA converts the public key to an ECDSA public key. + ToECDSA() *ecdsa.PublicKey + + // GetCurve returns the current curve as an interface. + GetCurve() interface{} + + // GetX returns the point's X value. + GetX() *big.Int + + // GetY returns the point's Y value. + GetY() *big.Int + + // GetType returns the ECDSA type of this key. + GetType() int +} + +// PrivateKey is an interface representing a private key and its associated +// functions. +type PrivateKey interface { + // Serialize serializes the 32-byte private key scalar to a + // byte slice. + Serialize() []byte + + // SerializeSecret serializes the secret to the default serialization + // format. Used for Ed25519. + SerializeSecret() []byte + + // Public returns the (X,Y) coordinates of the point produced + // by scalar multiplication of the scalar by the base point, + // AKA the public key. + Public() (*big.Int, *big.Int) + + // GetD returns the value of the private scalar. + GetD() *big.Int + + // GetType returns the ECDSA type of this key. + GetType() int +} + +// Signature is an interface representing a signature and its associated +// functions. +type Signature interface { + // Serialize serializes the signature to the default serialization + // format. + Serialize() []byte + + // GetR gets the R value of the signature. + GetR() *big.Int + + // GetS gets the S value of the signature. + GetS() *big.Int + + // GetType returns the ECDSA type of this key. + GetType() int +} + +// DSA is an encapsulating interface for all the functions of a digital +// signature algorithm. +type DSA interface { + // ---------------------------------------------------------------------------- + // Constants + // + // GetP gets the prime modulus of the curve. + GetP() *big.Int + + // GetN gets the prime order of the curve. + GetN() *big.Int + + // ---------------------------------------------------------------------------- + // EC Math + // + // Add adds two points on the curve. + Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) + + // IsOnCurve checks if a given point is on the curve. + IsOnCurve(x *big.Int, y *big.Int) bool + + // ScalarMult gives the product of scalar multiplication of scalar k + // by point (x,y) on the curve. + ScalarMult(x, y *big.Int, k []byte) (*big.Int, *big.Int) + + // ScalarBaseMult gives the product of scalar multiplication of + // scalar k by the base point (generator) of the curve. + ScalarBaseMult(k []byte) (*big.Int, *big.Int) + + // ---------------------------------------------------------------------------- + // Private keys + // + // NewPrivateKey instantiates a new private key for the given + // curve. + NewPrivateKey(*big.Int) PrivateKey + + // PrivKeyFromBytes calculates the public key from serialized bytes, + // and returns both it and the private key. + PrivKeyFromBytes(pk []byte) (PrivateKey, PublicKey) + + // PrivKeyFromScalar calculates the public key from serialized scalar + // bytes, and returns both it and the private key. Useful for curves + // like Ed25519, where serialized private keys are different from + // serialized private scalars. + PrivKeyFromScalar(pk []byte) (PrivateKey, PublicKey) + + // PrivKeyBytesLen returns the length of a serialized private key. + PrivKeyBytesLen() int + + // ---------------------------------------------------------------------------- + // Public keys + // + // NewPublicKey instantiates a new public key (point) for the + // given curve. + NewPublicKey(x *big.Int, y *big.Int) PublicKey + + // ParsePubKey parses a serialized public key for the given + // curve and returns a public key. + ParsePubKey(pubKeyStr []byte) (PublicKey, error) + + // PubKeyBytesLen returns the length of the default serialization + // method for a public key. + PubKeyBytesLen() int + + // PubKeyBytesLenUncompressed returns the length of the uncompressed + // serialization method for a public key. + PubKeyBytesLenUncompressed() int + + // PubKeyBytesLenCompressed returns the length of the compressed + // serialization method for a public key. + PubKeyBytesLenCompressed() int + + // PubKeyBytesLenHybrid returns the length of the hybrid + // serialization method for a public key. + PubKeyBytesLenHybrid() int + + // ---------------------------------------------------------------------------- + // Signatures + // + // NewSignature instantiates a new signature for the given ECDSA + // method. + NewSignature(r *big.Int, s *big.Int) Signature + + // ParseDERSignature parses a DER encoded signature for the given + // ECDSA method. If the method doesn't support DER signatures, it + // just parses with the default method. + ParseDERSignature(sigStr []byte) (Signature, error) + + // ParseSignature a default encoded signature for the given ECDSA + // method. + ParseSignature(sigStr []byte) (Signature, error) + + // RecoverCompact recovers a public key from an encoded signature + // and message, then verifies the signature against the public + // key. + RecoverCompact(signature, hash []byte) (PublicKey, bool, error) + + // ---------------------------------------------------------------------------- + // ECDSA + // + // GenerateKey generates a new private and public keypair from the + // given reader. + GenerateKey(rand io.Reader) ([]byte, *big.Int, *big.Int, error) + + // Sign produces an ECDSA signature in the form of (R,S) using a + // private key and a message. + Sign(priv PrivateKey, hash []byte) (r, s *big.Int, err error) + + // Verify verifies an ECDSA signature against a given message and + // public key. + Verify(pub PublicKey, hash []byte, r, s *big.Int) bool + + // ---------------------------------------------------------------------------- + // Symmetric cipher encryption + // + // GenerateSharedSecret generates a shared secret using a private scalar + // and a public key using ECDH. + GenerateSharedSecret(privkey []byte, x, y *big.Int) []byte + + // Encrypt encrypts data to a recipient public key. + Encrypt(x, y *big.Int, in []byte) ([]byte, error) + + // Decrypt decrypts data encoded to the public key that originates + // from the passed private scalar. + Decrypt(privkey []byte, in []byte) ([]byte, error) +} + +// -------------------------------------------------------------------------------- +// Accessible DSA suites for export. +// +const ( + ECTypeSecp256k1 int = iota // 0 + ECTypeEdwards // 1 + ECTypeSecSchnorr // 2 +) + +// Secp256k1 is the secp256k1 curve and ECDSA system used in Bitcoin. +var Secp256k1 = newSecp256k1DSA() + +// Edwards is the Ed25519 ECDSA signature system. +var Edwards = newEdwardsDSA() + +// SecSchnorr is a Schnorr signature scheme about the secp256k1 curve +// implemented in libsecp256k1. +var SecSchnorr = newSecSchnorrDSA() diff --git a/chaincfg/chainec/doc.go b/chaincfg/chainec/doc.go new file mode 100644 index 00000000..7c8cd518 --- /dev/null +++ b/chaincfg/chainec/doc.go @@ -0,0 +1,16 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package chainec provides wrapper functions to abstract the ec functions. + +Overview + +This package provides thin wrappers around the ec or crypto function used +to make it easier to go from btcec (btcd) to ed25519 (decred) for example +without changing the main body of the code. + +*/ + +package chainec diff --git a/chaincfg/chainec/edwards.go b/chaincfg/chainec/edwards.go new file mode 100644 index 00000000..e7ae008a --- /dev/null +++ b/chaincfg/chainec/edwards.go @@ -0,0 +1,334 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "errors" + "io" + "math/big" + + "github.com/decred/dcrd/dcrec/edwards" +) + +type edwardsDSA struct { + // Constants + getN func() *big.Int + getP func() *big.Int + + // EC Math + add func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) + isOnCurve func(x *big.Int, y *big.Int) bool + scalarMult func(x, y *big.Int, k []byte) (*big.Int, *big.Int) + scalarBaseMult func(k []byte) (*big.Int, *big.Int) + + // Private keys + newPrivateKey func(d *big.Int) PrivateKey + privKeyFromBytes func(pk []byte) (PrivateKey, PublicKey) + privKeyFromScalar func(pk []byte) (PrivateKey, PublicKey) + privKeyBytesLen func() int + + // Public keys + newPublicKey func(x *big.Int, y *big.Int) PublicKey + parsePubKey func(pubKeyStr []byte) (PublicKey, error) + pubKeyBytesLen func() int + pubKeyBytesLenUncompressed func() int + pubKeyBytesLenCompressed func() int + pubKeyBytesLenHybrid func() int + + // Signatures + newSignature func(r *big.Int, s *big.Int) Signature + parseDERSignature func(sigStr []byte) (Signature, error) + parseSignature func(sigStr []byte) (Signature, error) + recoverCompact func(signature, hash []byte) (PublicKey, bool, error) + + // ECDSA + generateKey func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) + sign func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) + verify func(pub PublicKey, hash []byte, r, s *big.Int) bool + + // Symmetric cipher encryption + generateSharedSecret func(privkey []byte, x, y *big.Int) []byte + encrypt func(x, y *big.Int, in []byte) ([]byte, error) + decrypt func(privkey []byte, in []byte) ([]byte, error) +} + +var ( + edwardsCurve = edwards.Edwards() +) + +// Boilerplate exported functions to make the struct interact with the interface. +// Constants +func (e edwardsDSA) GetP() *big.Int { + return e.getP() +} +func (e edwardsDSA) GetN() *big.Int { + return e.getN() +} + +// EC Math +func (e edwardsDSA) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return e.add(x1, y1, x2, y2) +} +func (e edwardsDSA) IsOnCurve(x, y *big.Int) bool { + return e.isOnCurve(x, y) +} +func (e edwardsDSA) ScalarMult(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return e.scalarMult(x, y, k) +} +func (e edwardsDSA) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return e.scalarBaseMult(k) +} + +// Private keys +func (e edwardsDSA) NewPrivateKey(d *big.Int) PrivateKey { + return e.newPrivateKey(d) +} +func (e edwardsDSA) PrivKeyFromBytes(pk []byte) (PrivateKey, PublicKey) { + return e.privKeyFromBytes(pk) +} +func (e edwardsDSA) PrivKeyFromScalar(pk []byte) (PrivateKey, PublicKey) { + return e.privKeyFromScalar(pk) +} +func (e edwardsDSA) PrivKeyBytesLen() int { + return e.privKeyBytesLen() +} + +// Public keys +func (e edwardsDSA) NewPublicKey(x *big.Int, y *big.Int) PublicKey { + return e.newPublicKey(x, y) +} +func (e edwardsDSA) ParsePubKey(pubKeyStr []byte) (PublicKey, error) { + return e.parsePubKey(pubKeyStr) +} +func (e edwardsDSA) PubKeyBytesLen() int { + return e.pubKeyBytesLen() +} +func (e edwardsDSA) PubKeyBytesLenUncompressed() int { + return e.pubKeyBytesLenUncompressed() +} +func (e edwardsDSA) PubKeyBytesLenCompressed() int { + return e.pubKeyBytesLenCompressed() +} +func (e edwardsDSA) PubKeyBytesLenHybrid() int { + return e.pubKeyBytesLenCompressed() +} + +// Signatures +func (e edwardsDSA) NewSignature(r, s *big.Int) Signature { + return e.newSignature(r, s) +} +func (e edwardsDSA) ParseDERSignature(sigStr []byte) (Signature, error) { + return e.parseDERSignature(sigStr) +} +func (e edwardsDSA) ParseSignature(sigStr []byte) (Signature, error) { + return e.parseSignature(sigStr) +} +func (e edwardsDSA) RecoverCompact(signature, hash []byte) (PublicKey, bool, + error) { + return e.recoverCompact(signature, hash) +} + +// ECDSA +func (e edwardsDSA) GenerateKey(rand io.Reader) ([]byte, *big.Int, *big.Int, + error) { + return e.generateKey(rand) +} +func (e edwardsDSA) Sign(priv PrivateKey, hash []byte) (r, s *big.Int, + err error) { + r, s, err = e.sign(priv, hash) + return +} +func (e edwardsDSA) Verify(pub PublicKey, hash []byte, r, s *big.Int) bool { + return e.verify(pub, hash, r, s) +} + +// Symmetric cipher encryption +func (e edwardsDSA) GenerateSharedSecret(privkey []byte, x, y *big.Int) []byte { + return e.generateSharedSecret(privkey, x, y) +} +func (e edwardsDSA) Encrypt(x, y *big.Int, in []byte) ([]byte, + error) { + return e.encrypt(x, y, in) +} +func (e edwardsDSA) Decrypt(privkey []byte, in []byte) ([]byte, + error) { + return e.decrypt(privkey, in) +} + +// newEdwardsDSA instatiates a function DSA subsystem over the edwards 25519 +// curve. A caveat for the functions below is that they're all routed through +// interfaces, and nil returns from the library itself for interfaces must +// ALWAYS be checked by checking the return value by attempted dereference +// (== nil). +func newEdwardsDSA() DSA { + var ed DSA = &edwardsDSA{ + // Constants + getP: func() *big.Int { + return edwardsCurve.P + }, + getN: func() *big.Int { + return edwardsCurve.N + }, + + // EC Math + add: func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return edwardsCurve.Add(x1, y1, x2, y2) + }, + isOnCurve: func(x, y *big.Int) bool { + return edwardsCurve.IsOnCurve(x, y) + }, + scalarMult: func(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return edwardsCurve.ScalarMult(x, y, k) + }, + scalarBaseMult: func(k []byte) (*big.Int, *big.Int) { + return edwardsCurve.ScalarBaseMult(k) + }, + + // Private keys + newPrivateKey: func(d *big.Int) PrivateKey { + pk := edwards.NewPrivateKey(edwardsCurve, d) + if pk != nil { + return PrivateKey(*pk) + } + return nil + }, + privKeyFromBytes: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub := edwards.PrivKeyFromBytes(edwardsCurve, pk) + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(*priv) + tpub := PublicKey(*pub) + return tpriv, tpub + }, + privKeyFromScalar: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub, err := edwards.PrivKeyFromScalar(edwardsCurve, pk) + if err != nil { + return nil, nil + } + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(*priv) + tpub := PublicKey(*pub) + return tpriv, tpub + }, + privKeyBytesLen: func() int { + return edwards.PrivKeyBytesLen + }, + + // Public keys + newPublicKey: func(x *big.Int, y *big.Int) PublicKey { + pk := edwards.NewPublicKey(edwardsCurve, x, y) + tpk := PublicKey(*pk) + return tpk + }, + parsePubKey: func(pubKeyStr []byte) (PublicKey, error) { + pk, err := edwards.ParsePubKey(edwardsCurve, pubKeyStr) + if err != nil { + return nil, err + } + tpk := PublicKey(*pk) + return tpk, err + }, + pubKeyBytesLen: func() int { + return edwards.PubKeyBytesLen + }, + pubKeyBytesLenUncompressed: func() int { + return edwards.PubKeyBytesLen + }, + pubKeyBytesLenCompressed: func() int { + return edwards.PubKeyBytesLen + }, + pubKeyBytesLenHybrid: func() int { + return edwards.PubKeyBytesLen + }, + + // Signatures + newSignature: func(r *big.Int, s *big.Int) Signature { + sig := edwards.NewSignature(r, s) + ts := Signature(*sig) + return ts + }, + parseDERSignature: func(sigStr []byte) (Signature, error) { + sig, err := edwards.ParseDERSignature(edwardsCurve, sigStr) + if err != nil { + return nil, err + } + ts := Signature(*sig) + return ts, err + }, + parseSignature: func(sigStr []byte) (Signature, error) { + sig, err := edwards.ParseSignature(edwardsCurve, sigStr) + if err != nil { + return nil, err + } + ts := Signature(*sig) + return ts, err + }, + recoverCompact: func(signature, hash []byte) (PublicKey, bool, error) { + pk, bl, err := edwards.RecoverCompact(signature, hash) + tpk := PublicKey(*pk) + return tpk, bl, err + }, + + // ECDSA + generateKey: func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) { + return edwards.GenerateKey(edwardsCurve, rand) + }, + sign: func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) { + if priv.GetType() != ECTypeEdwards { + return nil, nil, errors.New("wrong type") + } + epriv, ok := priv.(edwards.PrivateKey) + if !ok { + return nil, nil, errors.New("wrong type") + } + r, s, err = edwards.Sign(edwardsCurve, &epriv, hash) + return + }, + verify: func(pub PublicKey, hash []byte, r, s *big.Int) bool { + if pub.GetType() != ECTypeEdwards { + return false + } + epub, ok := pub.(edwards.PublicKey) + if !ok { + return false + } + return edwards.Verify(&epub, hash, r, s) + }, + + // Symmetric cipher encryption + generateSharedSecret: func(privkey []byte, x, y *big.Int) []byte { + privKeyLocal, _, err := edwards.PrivKeyFromScalar(edwardsCurve, + privkey) + if err != nil { + return nil + } + pubkey := edwards.NewPublicKey(edwardsCurve, x, y) + return edwards.GenerateSharedSecret(privKeyLocal, pubkey) + }, + encrypt: func(x, y *big.Int, in []byte) ([]byte, error) { + pubkey := edwards.NewPublicKey(edwardsCurve, x, y) + return edwards.Encrypt(edwardsCurve, pubkey, in) + }, + decrypt: func(privkey []byte, in []byte) ([]byte, error) { + privKeyLocal, _, err := edwards.PrivKeyFromScalar(edwardsCurve, + privkey) + if err != nil { + return nil, err + } + return edwards.Decrypt(edwardsCurve, privKeyLocal, in) + }, + } + + return ed.(DSA) +} diff --git a/chaincfg/chainec/edwards_test.go b/chaincfg/chainec/edwards_test.go new file mode 100644 index 00000000..e66e3478 --- /dev/null +++ b/chaincfg/chainec/edwards_test.go @@ -0,0 +1,91 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func TestGeneralEd25519(t *testing.T) { + // Sample pubkey + samplePubkey, _ := hex.DecodeString("b0d88c8d1d327d1bc6f00f6d7682c98" + + "562869a798b96367bf8d67712c9cb1d17") + _, err := Edwards.ParsePubKey(samplePubkey) + if err != nil { + t.Errorf("failure parsing pubkey: %v", err) + } + + // Sample privkey secret + samplePrivKey, _ := hex.DecodeString("a980f892db13c99a3e8971e965b2ff3d4" + + "1eafd54093bc9f34d1fd22d84115bb644b57ee30cdb55829d0a5d4f046baef078f1e97" + + "a7f21b62d75f8e96ea139c35f") + privTest, _ := Edwards.PrivKeyFromBytes(samplePrivKey) + if privTest == nil { + t.Errorf("failure parsing privkey from secret") + } + + // Sample privkey scalar + samplePrivKeyScalar, _ := hex.DecodeString("04c723f67789d320bfcccc0ff2bc84" + + "95a09c2356fa63ac6457107c295e6fde68") + privTest, _ = Edwards.PrivKeyFromScalar(samplePrivKeyScalar) + if privTest == nil { + t.Errorf("failure parsing privkey from secret") + } + + // Sample signature + sampleSig, _ := hex.DecodeString( + "71301d3212915df23211bbd0bae5e678a51c7212ecc9341a91c48fbe96772e08" + + "cdd3d3b1f8ec828b3546b61a27b53a5472597ffd1771c39219741070ca62a40c") + _, err = Edwards.ParseDERSignature(sampleSig) + if err != nil { + t.Errorf("failure parsing DER signature: %v", err) + } +} + +func TestPrivKeysEdwards(t *testing.T) { + tests := []struct { + name string + key []byte + }{ + { + name: "check curve", + key: []byte{ + 0x0e, 0x10, 0xcb, 0xb0, 0x70, 0x27, 0xb9, 0x76, + 0x36, 0xf8, 0x36, 0x48, 0xb2, 0xb5, 0x1a, 0x98, + 0x7d, 0xad, 0x78, 0x2e, 0xbd, 0xaf, 0xcf, 0xbc, + 0x4f, 0xe8, 0xd7, 0x49, 0x84, 0x2b, 0x24, 0xd8, + }, + }, + } + + for _, test := range tests { + priv, pub := Edwards.PrivKeyFromScalar(test.key) + if priv == nil || pub == nil { + t.Errorf("failure deserializing from bytes") + continue + } + + hash := []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9} + r, s, err := Edwards.Sign(priv, hash) + if err != nil { + t.Errorf("%s could not sign: %v", test.name, err) + continue + } + sig := Edwards.NewSignature(r, s) + + if !Edwards.Verify(pub, hash, sig.GetR(), sig.GetS()) { + t.Errorf("%s could not verify: %v", test.name, err) + continue + } + + serializedKey := priv.Serialize() + if !bytes.Equal(serializedKey, test.key) { + t.Errorf("%s unexpected serialized bytes - got: %x, "+ + "want: %x", test.name, serializedKey, test.key) + } + } +} diff --git a/chaincfg/chainec/secp256k1.go b/chaincfg/chainec/secp256k1.go new file mode 100644 index 00000000..fb898878 --- /dev/null +++ b/chaincfg/chainec/secp256k1.go @@ -0,0 +1,336 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "errors" + "fmt" + "io" + "math/big" + + "github.com/decred/dcrd/dcrec/secp256k1" +) + +type secp256k1DSA struct { + // Constants + getN func() *big.Int + getP func() *big.Int + + // EC Math + add func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) + isOnCurve func(x *big.Int, y *big.Int) bool + scalarMult func(x, y *big.Int, k []byte) (*big.Int, *big.Int) + scalarBaseMult func(k []byte) (*big.Int, *big.Int) + + // Private keys + newPrivateKey func(d *big.Int) PrivateKey + privKeyFromBytes func(pk []byte) (PrivateKey, PublicKey) + privKeyFromScalar func(pk []byte) (PrivateKey, PublicKey) + privKeyBytesLen func() int + + // Public keys + newPublicKey func(x *big.Int, y *big.Int) PublicKey + parsePubKey func(pubKeyStr []byte) (PublicKey, error) + pubKeyBytesLen func() int + pubKeyBytesLenUncompressed func() int + pubKeyBytesLenCompressed func() int + pubKeyBytesLenHybrid func() int + + // Signatures + newSignature func(r *big.Int, s *big.Int) Signature + parseDERSignature func(sigStr []byte) (Signature, error) + parseSignature func(sigStr []byte) (Signature, error) + recoverCompact func(signature, hash []byte) (PublicKey, bool, error) + + // ECDSA + generateKey func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) + sign func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) + verify func(pub PublicKey, hash []byte, r, s *big.Int) bool + + // Symmetric cipher encryption + generateSharedSecret func(privkey []byte, x, y *big.Int) []byte + encrypt func(x, y *big.Int, in []byte) ([]byte, error) + decrypt func(privkey []byte, in []byte) ([]byte, error) +} + +var ( + secp256k1Curve = secp256k1.S256() +) + +// Boilerplate exported functions to make the struct interact with the interface. +// Constants +func (s secp256k1DSA) GetP() *big.Int { + return s.getP() +} +func (s secp256k1DSA) GetN() *big.Int { + return s.getN() +} + +// EC Math +func (s secp256k1DSA) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return s.add(x1, y1, x2, y2) +} +func (s secp256k1DSA) IsOnCurve(x, y *big.Int) bool { + return s.isOnCurve(x, y) +} +func (s secp256k1DSA) ScalarMult(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return s.scalarMult(x, y, k) +} +func (s secp256k1DSA) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return s.scalarBaseMult(k) +} + +// Private keys +func (s secp256k1DSA) NewPrivateKey(d *big.Int) PrivateKey { + return s.newPrivateKey(d) +} +func (s secp256k1DSA) PrivKeyFromBytes(pk []byte) (PrivateKey, PublicKey) { + return s.privKeyFromBytes(pk) +} +func (s secp256k1DSA) PrivKeyFromScalar(pk []byte) (PrivateKey, PublicKey) { + return s.privKeyFromScalar(pk) +} +func (s secp256k1DSA) PrivKeyBytesLen() int { + return s.privKeyBytesLen() +} + +// Public keys +func (s secp256k1DSA) NewPublicKey(x *big.Int, y *big.Int) PublicKey { + return s.newPublicKey(x, y) +} +func (s secp256k1DSA) ParsePubKey(pubKeyStr []byte) (PublicKey, error) { + return s.parsePubKey(pubKeyStr) +} +func (s secp256k1DSA) PubKeyBytesLen() int { + return s.pubKeyBytesLen() +} +func (s secp256k1DSA) PubKeyBytesLenUncompressed() int { + return s.pubKeyBytesLenUncompressed() +} +func (s secp256k1DSA) PubKeyBytesLenCompressed() int { + return s.pubKeyBytesLenCompressed() +} +func (s secp256k1DSA) PubKeyBytesLenHybrid() int { + return s.pubKeyBytesLenCompressed() +} + +// Signatures +func (sp secp256k1DSA) NewSignature(r, s *big.Int) Signature { + return sp.newSignature(r, s) +} +func (s secp256k1DSA) ParseDERSignature(sigStr []byte) (Signature, error) { + return s.parseDERSignature(sigStr) +} +func (s secp256k1DSA) ParseSignature(sigStr []byte) (Signature, error) { + return s.parseSignature(sigStr) +} +func (s secp256k1DSA) RecoverCompact(signature, hash []byte) (PublicKey, bool, + error) { + return s.recoverCompact(signature, hash) +} + +// ECDSA +func (s secp256k1DSA) GenerateKey(rand io.Reader) ([]byte, *big.Int, *big.Int, + error) { + return s.generateKey(rand) +} +func (sp secp256k1DSA) Sign(priv PrivateKey, hash []byte) (r, s *big.Int, + err error) { + r, s, err = sp.sign(priv, hash) + return +} +func (sp secp256k1DSA) Verify(pub PublicKey, hash []byte, r, s *big.Int) bool { + return sp.verify(pub, hash, r, s) +} + +// Symmetric cipher encryption +func (s secp256k1DSA) GenerateSharedSecret(privkey []byte, x, y *big.Int) []byte { + return s.generateSharedSecret(privkey, x, y) +} +func (s secp256k1DSA) Encrypt(x, y *big.Int, in []byte) ([]byte, + error) { + return s.encrypt(x, y, in) +} +func (s secp256k1DSA) Decrypt(privkey []byte, in []byte) ([]byte, + error) { + return s.decrypt(privkey, in) +} + +// newSecp256k1DSA instatiates a function DSA subsystem over the secp256k1 +// curve. A caveat for the functions below is that they're all routed through +// interfaces, and nil returns from the library itself for interfaces must +// ALWAYS be checked by checking the return value by attempted dereference +// (== nil). +func newSecp256k1DSA() DSA { + var secp DSA = &secp256k1DSA{ + // Constants + getP: func() *big.Int { + return secp256k1Curve.P + }, + getN: func() *big.Int { + return secp256k1Curve.N + }, + + // EC Math + add: func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return secp256k1Curve.Add(x1, y1, x2, y2) + }, + isOnCurve: func(x, y *big.Int) bool { + return secp256k1Curve.IsOnCurve(x, y) + }, + scalarMult: func(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return secp256k1Curve.ScalarMult(x, y, k) + }, + scalarBaseMult: func(k []byte) (*big.Int, *big.Int) { + return secp256k1Curve.ScalarBaseMult(k) + }, + + // Private keys + newPrivateKey: func(d *big.Int) PrivateKey { + if d == nil { + return nil + } + pk := secp256k1.NewPrivateKey(secp256k1Curve, d) + if pk != nil { + return PrivateKey(pk) + } + return nil + }, + privKeyFromBytes: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub := secp256k1.PrivKeyFromBytes(secp256k1Curve, pk) + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(priv) + tpub := PublicKey(pub) + return tpriv, tpub + }, + privKeyFromScalar: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub := secp256k1.PrivKeyFromScalar(secp256k1Curve, pk) + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(priv) + tpub := PublicKey(pub) + return tpriv, tpub + }, + privKeyBytesLen: func() int { + return secp256k1.PrivKeyBytesLen + }, + + // Public keys + newPublicKey: func(x *big.Int, y *big.Int) PublicKey { + pk := secp256k1.NewPublicKey(secp256k1Curve, x, y) + tpk := PublicKey(pk) + return tpk + }, + parsePubKey: func(pubKeyStr []byte) (PublicKey, error) { + pk, err := secp256k1.ParsePubKey(pubKeyStr, secp256k1Curve) + if err != nil { + return nil, err + } + tpk := PublicKey(pk) + return tpk, err + }, + pubKeyBytesLen: func() int { + return secp256k1.PubKeyBytesLenCompressed + }, + pubKeyBytesLenUncompressed: func() int { + return secp256k1.PubKeyBytesLenUncompressed + }, + pubKeyBytesLenCompressed: func() int { + return secp256k1.PubKeyBytesLenCompressed + }, + pubKeyBytesLenHybrid: func() int { + return secp256k1.PubKeyBytesLenHybrid + }, + + // Signatures + newSignature: func(r *big.Int, s *big.Int) Signature { + sig := secp256k1.NewSignature(r, s) + ts := Signature(sig) + return ts + }, + parseDERSignature: func(sigStr []byte) (Signature, error) { + sig, err := secp256k1.ParseDERSignature(sigStr, secp256k1Curve) + if err != nil { + return nil, err + } + ts := Signature(sig) + return ts, err + }, + parseSignature: func(sigStr []byte) (Signature, error) { + sig, err := secp256k1.ParseSignature(sigStr, secp256k1Curve) + if err != nil { + return nil, err + } + ts := Signature(sig) + return ts, err + }, + recoverCompact: func(signature, hash []byte) (PublicKey, bool, error) { + pk, bl, err := secp256k1.RecoverCompact(secp256k1Curve, signature, + hash) + tpk := PublicKey(pk) + return tpk, bl, err + }, + + // ECDSA + generateKey: func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) { + return secp256k1.GenerateKey(secp256k1Curve, rand) + }, + sign: func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) { + if priv.GetType() != ECTypeSecp256k1 { + return nil, nil, errors.New("wrong type") + } + spriv, ok := priv.(*secp256k1.PrivateKey) + if !ok { + return nil, nil, errors.New("wrong type") + } + sig, err := spriv.Sign(hash) + if sig != nil { + r = sig.GetR() + s = sig.GetS() + } + return + }, + verify: func(pub PublicKey, hash []byte, r, s *big.Int) bool { + spub := secp256k1.NewPublicKey(secp256k1Curve, pub.GetX(), pub.GetY()) + ssig := secp256k1.NewSignature(r, s) + return ssig.Verify(hash, spub) + }, + + // Symmetric cipher encryption + generateSharedSecret: func(privkey []byte, x, y *big.Int) []byte { + sprivkey, _ := secp256k1.PrivKeyFromBytes(secp256k1Curve, privkey) + if sprivkey == nil { + return nil + } + spubkey := secp256k1.NewPublicKey(secp256k1Curve, x, y) + + return secp256k1.GenerateSharedSecret(sprivkey, spubkey) + }, + encrypt: func(x, y *big.Int, in []byte) ([]byte, error) { + spubkey := secp256k1.NewPublicKey(secp256k1Curve, x, y) + + return secp256k1.Encrypt(spubkey, in) + }, + decrypt: func(privkey []byte, in []byte) ([]byte, error) { + sprivkey, _ := secp256k1.PrivKeyFromBytes(secp256k1Curve, privkey) + if sprivkey == nil { + return nil, fmt.Errorf("failure deserializing privkey") + } + + return secp256k1.Decrypt(sprivkey, in) + }, + } + + return secp.(DSA) +} diff --git a/chaincfg/chainec/secp256k1_test.go b/chaincfg/chainec/secp256k1_test.go new file mode 100644 index 00000000..f31b0d2e --- /dev/null +++ b/chaincfg/chainec/secp256k1_test.go @@ -0,0 +1,286 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +func TestGeneralSecp256k1(t *testing.T) { + // Sample expanded pubkey (Satoshi from Genesis block) + samplePubkey, _ := hex.DecodeString("04" + + "678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb6" + + "49f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") + _, err := Secp256k1.ParsePubKey(samplePubkey) + if err != nil { + t.Errorf("failure parsing pubkey: %v", err) + } + + // Sample compressed pubkey + samplePubkey, _ = hex.DecodeString("02" + + "4627032575180c2773b3eedd3a163dc2f3c6c84f9d0a1fc561a9578a15e6d0e3") + _, err = Secp256k1.ParsePubKey(samplePubkey) + if err != nil { + t.Errorf("failure parsing pubkey: %v", err) + } + + // Sample signature from https://en.bitcoin.it/wiki/Transaction + sampleSig, _ := hex.DecodeString("30" + + "45" + + "02" + + "20" + + "6e21798a42fae0e854281abd38bacd1aeed3ee3738d9e1446618c4571d1090db" + + "02" + + "21" + + "00e2ac980643b0b82c0e88ffdfec6b64e3e6ba35e7ba5fdd7d5d6cc8d25c6b2415") + _, err = Secp256k1.ParseDERSignature(sampleSig) + if err != nil { + t.Errorf("failure parsing DER signature: %v", err) + } +} + +type signatureTest struct { + name string + sig []byte + der bool + isValid bool +} + +// decodeHex decodes the passed hex string and returns the resulting bytes. It +// panics if an error occurs. This is only used in the tests as a helper since +// the only way it can fail is if there is an error in the test source code. +func decodeHex(hexStr string) []byte { + b, err := hex.DecodeString(hexStr) + if err != nil { + panic("invalid hex string in test source: err " + err.Error() + + ", hex: " + hexStr) + } + + return b +} + +type pubKeyTest struct { + name string + key []byte + format byte + isValid bool +} + +const ( + TstPubkeyUncompressed byte = 0x4 // x coord + y coord + TstPubkeyCompressed byte = 0x2 // y_bit + x coord + TstPubkeyHybrid byte = 0x6 // y_bit + x coord + y coord +) + +var pubKeyTests = []pubKeyTest{ + // pubkey from bitcoin blockchain tx + // 0437cd7f8525ceed2324359c2d0ba26006d92d85 + { + name: "uncompressed ok", + key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, + 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, + 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, + 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, + 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, + 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, + 0xb4, 0x12, 0xa3, + }, + isValid: true, + format: TstPubkeyUncompressed, + }, + { + name: "uncompressed as hybrid ok", + key: []byte{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, + 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, + 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, + 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, + 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, + 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, + 0xb4, 0x12, 0xa3, + }, + isValid: true, + format: TstPubkeyHybrid, + }, + // from tx 0b09c51c51ff762f00fb26217269d2a18e77a4fa87d69b3c363ab4df16543f20 + { + name: "compressed ok (ybit = 0)", + key: []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, + 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, + 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, + 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, + }, + isValid: true, + format: TstPubkeyCompressed, + }, + // from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c + { + name: "compressed ok (ybit = 1)", + key: []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, + 0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34, + 0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4, + 0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e, + }, + isValid: true, + format: TstPubkeyCompressed, + }, + { + name: "hybrid", + key: []byte{0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, + 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, + 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, + 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, 0x48, 0x3a, + 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb, + 0xfc, 0x0e, 0x11, 0x08, 0xa8, 0xfd, 0x17, 0xb4, 0x48, + 0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb, + 0x10, 0xd4, 0xb8, + }, + format: TstPubkeyHybrid, + isValid: true, + }, +} + +func TestPubKeys(t *testing.T) { + for _, test := range pubKeyTests { + pk, err := Secp256k1.ParsePubKey(test.key) + if err != nil { + if test.isValid { + t.Errorf("%s pubkey failed when shouldn't %v", + test.name, err) + } + continue + } + if !test.isValid { + t.Errorf("%s counted as valid when it should fail", + test.name) + continue + } + var pkStr []byte + switch test.format { + case TstPubkeyUncompressed: + pkStr = (PublicKey)(pk).SerializeUncompressed() + case TstPubkeyCompressed: + pkStr = (PublicKey)(pk).SerializeCompressed() + case TstPubkeyHybrid: + pkStr = (PublicKey)(pk).SerializeHybrid() + } + if !bytes.Equal(test.key, pkStr) { + t.Errorf("%s pubkey: serialized keys do not match.", + test.name) + spew.Dump(test.key) + spew.Dump(pkStr) + } + } +} + +func TestPrivKeys(t *testing.T) { + tests := []struct { + name string + key []byte + }{ + { + name: "check curve", + key: []byte{ + 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, + 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, + 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, + 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, + }, + }, + } + + for _, test := range tests { + priv, pub := Secp256k1.PrivKeyFromBytes(test.key) + _, err := Secp256k1.ParsePubKey(pub.SerializeUncompressed()) + if err != nil { + t.Errorf("%s privkey: %v", test.name, err) + continue + } + + hash := []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9} + r, s, err := Secp256k1.Sign(priv, hash) + if err != nil { + t.Errorf("%s could not sign: %v", test.name, err) + continue + } + sig := Secp256k1.NewSignature(r, s) + + if !Secp256k1.Verify(pub, hash, sig.GetR(), sig.GetS()) { + t.Errorf("%s could not verify: %v", test.name, err) + continue + } + + serializedKey := priv.Serialize() + if !bytes.Equal(serializedKey, test.key) { + t.Errorf("%s unexpected serialized bytes - got: %x, "+ + "want: %x", test.name, serializedKey, test.key) + } + } +} + +var signatureTests = []signatureTest{ + // signatures from bitcoin blockchain tx + // 0437cd7f8525ceed2324359c2d0ba26006d92d85 + { + name: "valid signature.", + sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69, + 0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1, + 0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6, + 0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd, + 0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca, + 0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90, + 0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22, + 0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, + }, + der: true, + isValid: true, + }, + { + name: "empty.", + sig: []byte{}, + isValid: false, + }, + { + name: "bad magic.", + sig: []byte{0x31, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69, + 0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1, + 0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6, + 0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd, + 0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca, + 0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90, + 0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22, + 0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, + }, + der: true, + isValid: false, + }, +} + +func TestSignatures(t *testing.T) { + for _, test := range signatureTests { + var err error + if test.der { + _, err = Secp256k1.ParseDERSignature(test.sig) + } else { + _, err = Secp256k1.ParseSignature(test.sig) + } + if err != nil { + if test.isValid { + t.Errorf("%s signature failed when shouldn't %v", + test.name, err) + } + continue + } + if !test.isValid { + t.Errorf("%s counted as valid when it should fail", + test.name) + } + } +} diff --git a/chaincfg/chainec/secschnorr.go b/chaincfg/chainec/secschnorr.go new file mode 100644 index 00000000..0fb9a442 --- /dev/null +++ b/chaincfg/chainec/secschnorr.go @@ -0,0 +1,314 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainec + +import ( + "fmt" + "io" + "math/big" + + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/decred/dcrd/dcrec/secp256k1/schnorr" +) + +type secSchnorrDSA struct { + // Constants + getN func() *big.Int + getP func() *big.Int + + // EC Math + add func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) + isOnCurve func(x *big.Int, y *big.Int) bool + scalarMult func(x, y *big.Int, k []byte) (*big.Int, *big.Int) + scalarBaseMult func(k []byte) (*big.Int, *big.Int) + + // Private keys + newPrivateKey func(d *big.Int) PrivateKey + privKeyFromBytes func(pk []byte) (PrivateKey, PublicKey) + privKeyFromScalar func(pk []byte) (PrivateKey, PublicKey) + privKeyBytesLen func() int + + // Public keys + newPublicKey func(x *big.Int, y *big.Int) PublicKey + parsePubKey func(pubKeyStr []byte) (PublicKey, error) + pubKeyBytesLen func() int + pubKeyBytesLenUncompressed func() int + pubKeyBytesLenCompressed func() int + pubKeyBytesLenHybrid func() int + + // Signatures + newSignature func(r *big.Int, s *big.Int) Signature + parseDERSignature func(sigStr []byte) (Signature, error) + parseSignature func(sigStr []byte) (Signature, error) + recoverCompact func(signature, hash []byte) (PublicKey, bool, error) + + // ECDSA + generateKey func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) + sign func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) + verify func(pub PublicKey, hash []byte, r, s *big.Int) bool + + // Symmetric cipher encryption + generateSharedSecret func(privkey []byte, x, y *big.Int) []byte + encrypt func(x, y *big.Int, in []byte) ([]byte, error) + decrypt func(privkey []byte, in []byte) ([]byte, error) +} + +// Boilerplate exported functions to make the struct interact with the interface. +// Constants +func (s secSchnorrDSA) GetP() *big.Int { + return s.getP() +} +func (s secSchnorrDSA) GetN() *big.Int { + return s.getN() +} + +// EC Math +func (s secSchnorrDSA) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return s.add(x1, y1, x2, y2) +} +func (s secSchnorrDSA) IsOnCurve(x, y *big.Int) bool { + return s.isOnCurve(x, y) +} +func (s secSchnorrDSA) ScalarMult(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return s.scalarMult(x, y, k) +} +func (s secSchnorrDSA) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return s.scalarBaseMult(k) +} + +// Private keys +func (s secSchnorrDSA) NewPrivateKey(d *big.Int) PrivateKey { + return s.newPrivateKey(d) +} +func (s secSchnorrDSA) PrivKeyFromBytes(pk []byte) (PrivateKey, PublicKey) { + return s.privKeyFromBytes(pk) +} +func (s secSchnorrDSA) PrivKeyFromScalar(pk []byte) (PrivateKey, PublicKey) { + return s.privKeyFromScalar(pk) +} +func (s secSchnorrDSA) PrivKeyBytesLen() int { + return s.privKeyBytesLen() +} + +// Public keys +func (s secSchnorrDSA) NewPublicKey(x *big.Int, y *big.Int) PublicKey { + return s.newPublicKey(x, y) +} +func (s secSchnorrDSA) ParsePubKey(pubKeyStr []byte) (PublicKey, error) { + return s.parsePubKey(pubKeyStr) +} +func (s secSchnorrDSA) PubKeyBytesLen() int { + return s.pubKeyBytesLen() +} +func (s secSchnorrDSA) PubKeyBytesLenUncompressed() int { + return s.pubKeyBytesLenUncompressed() +} +func (s secSchnorrDSA) PubKeyBytesLenCompressed() int { + return s.pubKeyBytesLenCompressed() +} +func (s secSchnorrDSA) PubKeyBytesLenHybrid() int { + return s.pubKeyBytesLenCompressed() +} + +// Signatures +func (sp secSchnorrDSA) NewSignature(r, s *big.Int) Signature { + return sp.newSignature(r, s) +} +func (s secSchnorrDSA) ParseDERSignature(sigStr []byte) (Signature, error) { + return s.parseDERSignature(sigStr) +} +func (s secSchnorrDSA) ParseSignature(sigStr []byte) (Signature, error) { + return s.parseSignature(sigStr) +} +func (s secSchnorrDSA) RecoverCompact(signature, hash []byte) (PublicKey, bool, + error) { + return s.recoverCompact(signature, hash) +} + +// ECDSA +func (s secSchnorrDSA) GenerateKey(rand io.Reader) ([]byte, *big.Int, *big.Int, + error) { + return s.generateKey(rand) +} +func (sp secSchnorrDSA) Sign(priv PrivateKey, hash []byte) (r, s *big.Int, + err error) { + r, s, err = sp.sign(priv, hash) + return +} +func (sp secSchnorrDSA) Verify(pub PublicKey, hash []byte, r, s *big.Int) bool { + return sp.verify(pub, hash, r, s) +} + +// Symmetric cipher encryption +func (s secSchnorrDSA) GenerateSharedSecret(privkey []byte, x, y *big.Int) []byte { + return s.generateSharedSecret(privkey, x, y) +} +func (s secSchnorrDSA) Encrypt(x, y *big.Int, in []byte) ([]byte, + error) { + return s.encrypt(x, y, in) +} +func (s secSchnorrDSA) Decrypt(privkey []byte, in []byte) ([]byte, + error) { + return s.decrypt(privkey, in) +} + +// newSecSchnorrDSA instatiates a function DSA subsystem over the secp256k1 +// curve. A caveat for the functions below is that they're all routed through +// interfaces, and nil returns from the library itself for interfaces must +// ALWAYS be checked by checking the return value by attempted dereference +// (== nil). +func newSecSchnorrDSA() DSA { + var secp DSA = &secSchnorrDSA{ + // Constants + getP: func() *big.Int { + return secp256k1Curve.P + }, + getN: func() *big.Int { + return secp256k1Curve.N + }, + + // EC Math + add: func(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + return secp256k1Curve.Add(x1, y1, x2, y2) + }, + isOnCurve: func(x, y *big.Int) bool { + return secp256k1Curve.IsOnCurve(x, y) + }, + scalarMult: func(x, y *big.Int, k []byte) (*big.Int, *big.Int) { + return secp256k1Curve.ScalarMult(x, y, k) + }, + scalarBaseMult: func(k []byte) (*big.Int, *big.Int) { + return secp256k1Curve.ScalarBaseMult(k) + }, + + // Private keys + newPrivateKey: func(d *big.Int) PrivateKey { + pk := secp256k1.NewPrivateKey(secp256k1Curve, d) + if pk != nil { + return PrivateKey(pk) + } + return nil + }, + privKeyFromBytes: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub := secp256k1.PrivKeyFromBytes(secp256k1Curve, pk) + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(priv) + tpub := PublicKey(pub) + return tpriv, tpub + }, + privKeyFromScalar: func(pk []byte) (PrivateKey, PublicKey) { + priv, pub := secp256k1.PrivKeyFromScalar(secp256k1Curve, pk) + if priv == nil { + return nil, nil + } + if pub == nil { + return nil, nil + } + tpriv := PrivateKey(priv) + tpub := PublicKey(pub) + return tpriv, tpub + }, + privKeyBytesLen: func() int { + return secp256k1.PrivKeyBytesLen + }, + + // Public keys + // Note that Schnorr only allows 33 byte public keys, however + // as they are secp256k1 you still have access to the other + // serialization types. + newPublicKey: func(x *big.Int, y *big.Int) PublicKey { + pk := secp256k1.NewPublicKey(secp256k1Curve, x, y) + tpk := PublicKey(pk) + return tpk + }, + parsePubKey: func(pubKeyStr []byte) (PublicKey, error) { + pk, err := schnorr.ParsePubKey(secp256k1Curve, pubKeyStr) + if err != nil { + return nil, err + } + tpk := PublicKey(pk) + return tpk, err + }, + pubKeyBytesLen: func() int { + return schnorr.PubKeyBytesLen + }, + pubKeyBytesLenUncompressed: func() int { + return schnorr.PubKeyBytesLen + }, + pubKeyBytesLenCompressed: func() int { + return schnorr.PubKeyBytesLen + }, + pubKeyBytesLenHybrid: func() int { + return schnorr.PubKeyBytesLen + }, + + // Signatures + newSignature: func(r *big.Int, s *big.Int) Signature { + sig := schnorr.NewSignature(r, s) + ts := Signature(sig) + return ts + }, + parseDERSignature: func(sigStr []byte) (Signature, error) { + sig, err := schnorr.ParseSignature(sigStr) + ts := Signature(sig) + return ts, err + }, + parseSignature: func(sigStr []byte) (Signature, error) { + sig, err := schnorr.ParseSignature(sigStr) + ts := Signature(sig) + return ts, err + }, + recoverCompact: func(signature, hash []byte) (PublicKey, bool, error) { + pk, bl, err := schnorr.RecoverPubkey(secp256k1Curve, signature, + hash) + tpk := PublicKey(pk) + return tpk, bl, err + }, + + // ECDSA + generateKey: func(rand io.Reader) ([]byte, *big.Int, *big.Int, error) { + return secp256k1.GenerateKey(secp256k1Curve, rand) + }, + sign: func(priv PrivateKey, hash []byte) (r, s *big.Int, err error) { + spriv := secp256k1.NewPrivateKey(secp256k1Curve, priv.GetD()) + return schnorr.Sign(secp256k1Curve, spriv, hash) + }, + verify: func(pub PublicKey, hash []byte, r, s *big.Int) bool { + spub := secp256k1.NewPublicKey(secp256k1Curve, pub.GetX(), pub.GetY()) + return schnorr.Verify(secp256k1Curve, spub, hash, r, s) + }, + + // Symmetric cipher encryption + generateSharedSecret: func(privkey []byte, x, y *big.Int) []byte { + sprivkey, _ := secp256k1.PrivKeyFromBytes(secp256k1Curve, privkey) + if sprivkey == nil { + return nil + } + spubkey := secp256k1.NewPublicKey(secp256k1Curve, x, y) + + return secp256k1.GenerateSharedSecret(sprivkey, spubkey) + }, + encrypt: func(x, y *big.Int, in []byte) ([]byte, error) { + spubkey := secp256k1.NewPublicKey(secp256k1Curve, x, y) + + return secp256k1.Encrypt(spubkey, in) + }, + decrypt: func(privkey []byte, in []byte) ([]byte, error) { + sprivkey, _ := secp256k1.PrivKeyFromBytes(secp256k1Curve, privkey) + if sprivkey == nil { + return nil, fmt.Errorf("failure deserializing privkey") + } + + return secp256k1.Decrypt(sprivkey, in) + }, + } + + return secp.(DSA) +} diff --git a/chaincfg/chainhash/README.md b/chaincfg/chainhash/README.md new file mode 100644 index 00000000..38727c21 --- /dev/null +++ b/chaincfg/chainhash/README.md @@ -0,0 +1,11 @@ +chainhash +========= + +chainhash is a wrapper around the hash function used for decred. It +is designed to isolate the code that needs to differ between btcd and +dcrd. + +## Installation and updating +```bash +$ go get -u github.com/decred/dcrd/chaincfg/chainhash +``` diff --git a/chaincfg/chainhash/doc.go b/chaincfg/chainhash/doc.go new file mode 100644 index 00000000..32100654 --- /dev/null +++ b/chaincfg/chainhash/doc.go @@ -0,0 +1,6 @@ +// Package chainhash defines the hash functions used. +// +// This package provides a wrapper around the hash function used. This is +// designed to isolate the code that needs to be changed to support coins +// with different hash functions (i.e, bitcoin vs decred). +package chainhash diff --git a/wire/shahash.go b/chaincfg/chainhash/hash.go similarity index 67% rename from wire/shahash.go rename to chaincfg/chainhash/hash.go index 2444dccd..95408229 100644 --- a/wire/shahash.go +++ b/chaincfg/chainhash/hash.go @@ -1,31 +1,32 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package wire +package chainhash import ( "encoding/hex" "fmt" ) -// Size of array used to store sha hashes. See ShaHash. +// Size of array used to store sha hashes. See Hash. const HashSize = 32 -// MaxHashStringSize is the maximum length of a ShaHash hash string. +// MaxHashStringSize is the maximum length of a Hash hash string. const MaxHashStringSize = HashSize * 2 // ErrHashStrSize describes an error that indicates the caller specified a hash // string that has too many characters. var ErrHashStrSize = fmt.Errorf("max hash string length is %v bytes", MaxHashStringSize) -// ShaHash is used in several of the bitcoin messages and common structures. It +// Hash is used in several of the bitcoin messages and common structures. It // typically represents the double sha256 of data. -type ShaHash [HashSize]byte +type Hash [HashSize]byte -// String returns the ShaHash as the hexadecimal string of the byte-reversed +// String returns the Hash as the hexadecimal string of the byte-reversed // hash. -func (hash ShaHash) String() string { +func (hash Hash) String() string { for i := 0; i < HashSize/2; i++ { hash[i], hash[HashSize-1-i] = hash[HashSize-1-i], hash[i] } @@ -37,7 +38,7 @@ func (hash ShaHash) String() string { // NOTE: This makes a copy of the bytes and should have probably been named // CloneBytes. It is generally cheaper to just slice the hash directly thereby // reusing the same bytes rather than calling this method. -func (hash *ShaHash) Bytes() []byte { +func (hash *Hash) Bytes() []byte { newHash := make([]byte, HashSize) copy(newHash, hash[:]) @@ -46,7 +47,7 @@ func (hash *ShaHash) Bytes() []byte { // SetBytes sets the bytes which represent the hash. An error is returned if // the number of bytes passed in is not HashSize. -func (hash *ShaHash) SetBytes(newHash []byte) error { +func (hash *Hash) SetBytes(newHash []byte) error { nhlen := len(newHash) if nhlen != HashSize { return fmt.Errorf("invalid sha length of %v, want %v", nhlen, @@ -58,14 +59,20 @@ func (hash *ShaHash) SetBytes(newHash []byte) error { } // IsEqual returns true if target is the same as hash. -func (hash *ShaHash) IsEqual(target *ShaHash) bool { +func (hash *Hash) IsEqual(target *Hash) bool { + if hash == nil && target == nil { + return true + } + if hash == nil || target == nil { + return false + } return *hash == *target } -// NewShaHash returns a new ShaHash from a byte slice. An error is returned if +// NewHash returns a new Hash from a byte slice. An error is returned if // the number of bytes passed in is not HashSize. -func NewShaHash(newHash []byte) (*ShaHash, error) { - var sh ShaHash +func NewHash(newHash []byte) (*Hash, error) { + var sh Hash err := sh.SetBytes(newHash) if err != nil { return nil, err @@ -73,10 +80,10 @@ func NewShaHash(newHash []byte) (*ShaHash, error) { return &sh, err } -// NewShaHashFromStr creates a ShaHash from a hash string. The string should be +// NewHashFromStr creates a Hash from a hash string. The string should be // the hexadecimal string of a byte-reversed hash, but any missing characters -// result in zero padding at the end of the ShaHash. -func NewShaHashFromStr(hash string) (*ShaHash, error) { +// result in zero padding at the end of the Hash. +func NewHashFromStr(hash string) (*Hash, error) { // Return error if hash string is too long. if len(hash) > MaxHashStringSize { return nil, ErrHashStrSize @@ -94,10 +101,10 @@ func NewShaHashFromStr(hash string) (*ShaHash, error) { } // Un-reverse the decoded bytes, copying into in leading bytes of a - // ShaHash. There is no need to explicitly pad the result as any + // Hash. There is no need to explicitly pad the result as any // missing (when len(buf) < HashSize) bytes from the decoded hex string - // will remain zeros at the end of the ShaHash. - var ret ShaHash + // will remain zeros at the end of the Hash. + var ret Hash blen := len(buf) mid := blen / 2 if blen%2 != 0 { diff --git a/chaincfg/chainhash/hashfuncs.go b/chaincfg/chainhash/hashfuncs.go new file mode 100644 index 00000000..a7e068eb --- /dev/null +++ b/chaincfg/chainhash/hashfuncs.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015 The Decred Developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainhash + +import ( + "github.com/decred/blake256" +) + +// HashFunc calculates the hash of the supplied bytes. +// TODO(jcv) Should modify blake256 so it has the same interface as blake2 +// and fastsha256 so these function can look more like btcsuite. Then should +// try to get it to the upstream blake256 repo +func HashFunc(data []byte) [blake256.Size]byte { + var outB [blake256.Size]byte + a := blake256.New() + a.Write(data) + out := a.Sum(nil) + for i, el := range out { + outB[i] = el + } + + return outB +} + +// HashFuncB calculates hash(b) and returns the resulting bytes. +func HashFuncB(b []byte) []byte { + a := blake256.New() + a.Write(b) + out := a.Sum(nil) + return out +} + +// HashFuncH calculates hash(b) and returns the resulting bytes as a Hash. +func HashFuncH(b []byte) Hash { + var outB [blake256.Size]byte + a := blake256.New() + a.Write(b) + out := a.Sum(nil) + for i, el := range out { + outB[i] = el + } + + return Hash(outB) +} + +const HashBlockSize = blake256.BlockSize diff --git a/chaincfg/doc.go b/chaincfg/doc.go index 3659adbf..3af27516 100644 --- a/chaincfg/doc.go +++ b/chaincfg/doc.go @@ -1,8 +1,8 @@ // Package chaincfg defines chain configuration parameters. // -// In addition to the main Bitcoin network, which is intended for the transfer +// In addition to the main Decred network, which is intended for the transfer // of monetary value, there also exists two currently active standard networks: -// regression test and testnet (version 3). These networks are incompatible +// regression test and testnet (version 0). These networks are incompatible // with each other (each sharing a different genesis block) and software should // handle errors where input intended for one network is used on an application // instance running on a different network. @@ -10,7 +10,7 @@ // For library packages, chaincfg provides the ability to lookup chain // parameters and encoding magics when passed a *Params. Older APIs not updated // to the new convention of passing a *Params may lookup the parameters for a -// wire.BitcoinNet using ParamsForNet, but be aware that this usage is +// wire.DecredNet using ParamsForNet, but be aware that this usage is // deprecated and will be removed from chaincfg in the future. // // For main packages, a (typically global) var may be assigned the address of @@ -25,11 +25,11 @@ // "fmt" // "log" // -// "github.com/btcsuite/btcutil" -// "github.com/btcsuite/btcd/chaincfg" +// "github.com/decred/dcrutil" +// "github.com/decred/dcrd/chaincfg" // ) // -// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network") +// var testnet = flag.Bool("testnet", false, "operate on the testnet Decred network") // // // By default (without -testnet), use mainnet. // var chainParams = &chaincfg.MainNetParams @@ -39,23 +39,23 @@ // // // Modify active network parameters if operating on testnet. // if *testnet { -// chainParams = &chaincfg.TestNet3Params +// chainParams = &chaincfg.TestNetParams // } // // // later... // // // Create and print new payment address, specific to the active network. // pubKeyHash := make([]byte, 20) -// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams) +// addr, err := dcrutil.NewAddressPubKeyHash(pubKeyHash, chainParams) // if err != nil { // log.Fatal(err) // } // fmt.Println(addr) // } // -// If an application does not use one of the three standard Bitcoin networks, +// If an application does not use one of the three standard Decred networks, // a new Params struct may be created which defines the parameters for the // non-standard network. As a general rule of thumb, all network parameters // should be unique to the network, but parameter collisions can still occur -// (unfortunately, this is the case with regtest and testnet3 sharing magics). +// (unfortunately, this is the case with regtest and testnet sharing magics). package chaincfg diff --git a/chaincfg/genesis.go b/chaincfg/genesis.go index 396c8392..2162a198 100644 --- a/chaincfg/genesis.go +++ b/chaincfg/genesis.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,165 +8,6 @@ package chaincfg import ( "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) - -// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for -// the main network, regression test network, and test network (version 3). -var genesisCoinbaseTx = wire.MsgTx{ - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, - Index: 0xffffffff, - }, - SignatureScript: []byte{ - 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */ - 0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */ - 0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */ - 0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */ - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */ - 0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */ - 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/ - 0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */ - 0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/ - 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */ - }, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 0x12a05f200, - PkScript: []byte{ - 0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */ - 0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */ - 0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */ - 0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */ - 0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */ - 0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */ - 0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */ - 0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */ - 0x1d, 0x5f, 0xac, /* |._.| */ - }, - }, - }, - LockTime: 0, -} - -// genesisHash is the hash of the first block in the block chain for the main -// network (genesis block). -var genesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, -}) - -// genesisMerkleRoot is the hash of the first transaction in the genesis block -// for the main network. -var genesisMerkleRoot = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. - 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, - 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, - 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, - 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, -}) - -// genesisBlock defines the genesis block of the block chain which serves as the -// public transaction ledger for the main network. -var genesisBlock = wire.MsgBlock{ - Header: wire.BlockHeader{ - Version: 1, - PrevBlock: wire.ShaHash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 18:15:05 +0000 UTC - Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000] - Nonce: 0x7c2bac1d, // 2083236893 - }, - Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, -} - -// regTestGenesisHash is the hash of the first block in the block chain for the -// regression test network (genesis block). -var regTestGenesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. - 0x06, 0x22, 0x6e, 0x46, 0x11, 0x1a, 0x0b, 0x59, - 0xca, 0xaf, 0x12, 0x60, 0x43, 0xeb, 0x5b, 0xbf, - 0x28, 0xc3, 0x4f, 0x3a, 0x5e, 0x33, 0x2a, 0x1f, - 0xc7, 0xb2, 0xb7, 0x3c, 0xf1, 0x88, 0x91, 0x0f, -}) - -// regTestGenesisMerkleRoot is the hash of the first transaction in the genesis -// block for the regression test network. It is the same as the merkle root for -// the main network. -var regTestGenesisMerkleRoot = genesisMerkleRoot - -// regTestGenesisBlock defines the genesis block of the block chain which serves -// as the public transaction ledger for the regression test network. -var regTestGenesisBlock = wire.MsgBlock{ - Header: wire.BlockHeader{ - Version: 1, - PrevBlock: wire.ShaHash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC - Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] - Nonce: 2, - }, - Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, -} - -// testNet3GenesisHash is the hash of the first block in the block chain for the -// test network (version 3). -var testNet3GenesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. - 0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71, - 0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae, - 0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad, - 0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00, -}) - -// testNet3GenesisMerkleRoot is the hash of the first transaction in the genesis -// block for the test network (version 3). It is the same as the merkle root -// for the main network. -var testNet3GenesisMerkleRoot = genesisMerkleRoot - -// testNet3GenesisBlock defines the genesis block of the block chain which -// serves as the public transaction ledger for the test network (version 3). -var testNet3GenesisBlock = wire.MsgBlock{ - Header: wire.BlockHeader{ - Version: 1, - PrevBlock: wire.ShaHash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC - Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000] - Nonce: 0x18aea41a, // 414098458 - }, - Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, -} - -// simNetGenesisHash is the hash of the first block in the block chain for the -// simulation test network. -var simNetGenesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. - 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, - 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, - 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, - 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, -}) - -// simNetGenesisMerkleRoot is the hash of the first transaction in the genesis -// block for the simulation test network. It is the same as the merkle root for -// the main network. -var simNetGenesisMerkleRoot = genesisMerkleRoot - -// simNetGenesisBlock defines the genesis block of the block chain which serves -// as the public transaction ledger for the simulation test network. -var simNetGenesisBlock = wire.MsgBlock{ - Header: wire.BlockHeader{ - Version: 1, - PrevBlock: wire.ShaHash{}, // 0000000000000000000000000000000000000000000000000000000000000000 - MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - Timestamp: time.Unix(1401292357, 0), // 2014-05-28 15:52:37 +0000 UTC - Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000] - Nonce: 2, - }, - Transactions: []*wire.MsgTx{&genesisCoinbaseTx}, -} diff --git a/chaincfg/genesis_test.go b/chaincfg/genesis_test.go index af51e1fe..032b6106 100644 --- a/chaincfg/genesis_test.go +++ b/chaincfg/genesis_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,279 +7,9 @@ package chaincfg_test import ( "bytes" + "encoding/hex" "testing" - "github.com/btcsuite/btcd/chaincfg" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/chaincfg" ) - -// TestGenesisBlock tests the genesis block of the main network for validity by -// checking the encoded bytes and hashes. -func TestGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := chaincfg.MainNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), genesisBlockBytes) { - t.Fatalf("TestGenesisBlock: Genesis block does not appear valid - "+ - "got %v, want %v", spew.Sdump(buf.Bytes()), - spew.Sdump(genesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := chaincfg.MainNetParams.GenesisBlock.BlockSha() - if !chaincfg.MainNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestGenesisBlock: Genesis block hash does not "+ - "appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(chaincfg.MainNetParams.GenesisHash)) - } -} - -// TestRegTestGenesisBlock tests the genesis block of the regression test -// network for validity by checking the encoded bytes and hashes. -func TestRegTestGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := chaincfg.RegressionNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestRegTestGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), regTestGenesisBlockBytes) { - t.Fatalf("TestRegTestGenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(regTestGenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := chaincfg.RegressionNetParams.GenesisBlock.BlockSha() - if !chaincfg.RegressionNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestRegTestGenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(chaincfg.RegressionNetParams.GenesisHash)) - } -} - -// TestTestNet3GenesisBlock tests the genesis block of the test network (version -// 3) for validity by checking the encoded bytes and hashes. -func TestTestNet3GenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := chaincfg.TestNet3Params.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestTestNet3GenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), testNet3GenesisBlockBytes) { - t.Fatalf("TestTestNet3GenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(testNet3GenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := chaincfg.TestNet3Params.GenesisBlock.BlockSha() - if !chaincfg.TestNet3Params.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestTestNet3GenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(chaincfg.TestNet3Params.GenesisHash)) - } -} - -// TestSimNetGenesisBlock tests the genesis block of the simulation test network -// for validity by checking the encoded bytes and hashes. -func TestSimNetGenesisBlock(t *testing.T) { - // Encode the genesis block to raw bytes. - var buf bytes.Buffer - err := chaincfg.SimNetParams.GenesisBlock.Serialize(&buf) - if err != nil { - t.Fatalf("TestSimNetGenesisBlock: %v", err) - } - - // Ensure the encoded block matches the expected bytes. - if !bytes.Equal(buf.Bytes(), simNetGenesisBlockBytes) { - t.Fatalf("TestSimNetGenesisBlock: Genesis block does not "+ - "appear valid - got %v, want %v", - spew.Sdump(buf.Bytes()), - spew.Sdump(simNetGenesisBlockBytes)) - } - - // Check hash of the block against expected hash. - hash := chaincfg.SimNetParams.GenesisBlock.BlockSha() - if !chaincfg.SimNetParams.GenesisHash.IsEqual(&hash) { - t.Fatalf("TestSimNetGenesisBlock: Genesis block hash does "+ - "not appear valid - got %v, want %v", spew.Sdump(hash), - spew.Sdump(chaincfg.SimNetParams.GenesisHash)) - } -} - -// genesisBlockBytes are the wire encoded bytes for the genesis block of the -// main network as of protocol version 60002. -var genesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0x29, 0xab, 0x5f, 0x49, /* |K.^J)._I| */ - 0xff, 0xff, 0x00, 0x1d, 0x1d, 0xac, 0x2b, 0x7c, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// regTestGenesisBlockBytes are the wire encoded bytes for the genesis block of -// the regression test network as of protocol version 60002. -var regTestGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0xda, 0xe5, 0x49, 0x4d, /* |K.^J)._I| */ - 0xff, 0xff, 0x7f, 0x20, 0x02, 0x00, 0x00, 0x00, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// testNet3GenesisBlockBytes are the wire encoded bytes for the genesis block of -// the test network (version 3) as of protocol version 60002. -var testNet3GenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0xda, 0xe5, 0x49, 0x4d, /* |K.^J)._I| */ - 0xff, 0xff, 0x00, 0x1d, 0x1a, 0xa4, 0xae, 0x18, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} - -// simNetGenesisBlockBytes are the wire encoded bytes for the genesis block of -// the simulation test network as of protocol version 70002. -var simNetGenesisBlockBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x3b, 0xa3, 0xed, 0xfd, /* |....;...| */ - 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, /* |z{..z.,>| */ - 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, /* |gv.a....| */ - 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, /* |..Q2:...| */ - 0x4b, 0x1e, 0x5e, 0x4a, 0x45, 0x06, 0x86, 0x53, /* |K.^J)._I| */ - 0xff, 0xff, 0x7f, 0x20, 0x02, 0x00, 0x00, 0x00, /* |......+|| */ - 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, /* |........| */ - 0xff, 0xff, 0x4d, 0x04, 0xff, 0xff, 0x00, 0x1d, /* |..M.....| */ - 0x01, 0x04, 0x45, 0x54, 0x68, 0x65, 0x20, 0x54, /* |..EThe T| */ - 0x69, 0x6d, 0x65, 0x73, 0x20, 0x30, 0x33, 0x2f, /* |imes 03/| */ - 0x4a, 0x61, 0x6e, 0x2f, 0x32, 0x30, 0x30, 0x39, /* |Jan/2009| */ - 0x20, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x6c, /* | Chancel| */ - 0x6c, 0x6f, 0x72, 0x20, 0x6f, 0x6e, 0x20, 0x62, /* |lor on b| */ - 0x72, 0x69, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, /* |rink of | */ - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x62, /* |second b| */ - 0x61, 0x69, 0x6c, 0x6f, 0x75, 0x74, 0x20, 0x66, /* |ailout f| */ - 0x6f, 0x72, 0x20, 0x62, 0x61, 0x6e, 0x6b, 0x73, /* |or banks| */ - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, 0x05, /* |........| */ - 0x2a, 0x01, 0x00, 0x00, 0x00, 0x43, 0x41, 0x04, /* |*....CA.| */ - 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, 0x48, 0x27, /* |g....UH'| */ - 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, 0xb7, 0x10, /* |.g..q0..| */ - 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, 0x09, 0xa6, /* |\..(.9..| */ - 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, 0xde, 0xb6, /* |yb...a..| */ - 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, 0x38, 0xc4, /* |I..?L.8.| */ - 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, 0x12, 0xde, /* |.U......| */ - 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, 0x8d, 0x57, /* |\8M....W| */ - 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/ - 0xac, 0x00, 0x00, 0x00, 0x00, /* |.....| */ -} diff --git a/chaincfg/internal_test.go b/chaincfg/internal_test.go index 6db2da14..706b0544 100644 --- a/chaincfg/internal_test.go +++ b/chaincfg/internal_test.go @@ -1,14 +1,17 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + package chaincfg import ( + "github.com/decred/dcrd/chaincfg/chainhash" "testing" ) func TestInvalidShaStr(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Errorf("Expected panic for invalid sha string, got nil") - } - }() - newShaHashFromStr("banana") + _, err := chainhash.NewHashFromStr("banana") + if err == nil { + t.Error("Invalid string should fail.") + } } diff --git a/chaincfg/params.go b/chaincfg/params.go index 053c1b2a..4ad50739 100644 --- a/chaincfg/params.go +++ b/chaincfg/params.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,420 +8,8 @@ package chaincfg import ( "errors" "math/big" + "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) - -// These variables are the chain proof-of-work limit parameters for each default -// network. -var ( - // bigOne is 1 represented as a big.Int. It is defined here to avoid - // the overhead of creating it multiple times. - bigOne = big.NewInt(1) - - // mainPowLimit is the highest proof of work value a Bitcoin block can - // have for the main network. It is the value 2^224 - 1. - mainPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne) - - // regressionPowLimit is the highest proof of work value a Bitcoin block - // can have for the regression test network. It is the value 2^255 - 1. - regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) - - // testNet3PowLimit is the highest proof of work value a Bitcoin block - // can have for the test network (version 3). It is the value - // 2^224 - 1. - testNet3PowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne) - - // simNetPowLimit is the highest proof of work value a Bitcoin block - // can have for the simulation test network. It is the value 2^255 - 1. - simNetPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) -) - -// Checkpoint identifies a known good point in the block chain. Using -// checkpoints allows a few optimizations for old blocks during initial download -// and also prevents forks from old blocks. -// -// Each checkpoint is selected based upon several factors. See the -// documentation for blockchain.IsCheckpointCandidate for details on the -// selection criteria. -type Checkpoint struct { - Height int64 - Hash *wire.ShaHash -} - -// Params defines a Bitcoin network by its parameters. These parameters may be -// used by Bitcoin applications to differentiate networks as well as addresses -// and keys for one network from those intended for use on another network. -type Params struct { - Name string - Net wire.BitcoinNet - DefaultPort string - - // Chain parameters - GenesisBlock *wire.MsgBlock - GenesisHash *wire.ShaHash - PowLimit *big.Int - PowLimitBits uint32 - SubsidyHalvingInterval int32 - ResetMinDifficulty bool - GenerateSupported bool - - // Checkpoints ordered from oldest to newest. - Checkpoints []Checkpoint - - // Enforce current block version once network has - // upgraded. This is part of BIP0034. - BlockEnforceNumRequired uint64 - - // Reject previous block versions once network has - // upgraded. This is part of BIP0034. - BlockRejectNumRequired uint64 - - // The number of nodes to check. This is part of BIP0034. - BlockUpgradeNumToCheck uint64 - - // Mempool parameters - RelayNonStdTxs bool - - // Address encoding magics - PubKeyHashAddrID byte // First byte of a P2PKH address - ScriptHashAddrID byte // First byte of a P2SH address - PrivateKeyID byte // First byte of a WIF private key - - // BIP32 hierarchical deterministic extended key magics - HDPrivateKeyID [4]byte - HDPublicKeyID [4]byte - - // BIP44 coin type used in the hierarchical deterministic path for - // address generation. - HDCoinType uint32 -} - -// MainNetParams defines the network parameters for the main Bitcoin network. -var MainNetParams = Params{ - Name: "mainnet", - Net: wire.MainNet, - DefaultPort: "8333", - - // Chain parameters - GenesisBlock: &genesisBlock, - GenesisHash: &genesisHash, - PowLimit: mainPowLimit, - PowLimitBits: 0x1d00ffff, - SubsidyHalvingInterval: 210000, - ResetMinDifficulty: false, - GenerateSupported: false, - - // Checkpoints ordered from oldest to newest. - Checkpoints: []Checkpoint{ - {11111, newShaHashFromStr("0000000069e244f73d78e8fd29ba2fd2ed618bd6fa2ee92559f542fdb26e7c1d")}, - {33333, newShaHashFromStr("000000002dd5588a74784eaa7ab0507a18ad16a236e7b1ce69f00d7ddfb5d0a6")}, - {74000, newShaHashFromStr("0000000000573993a3c9e41ce34471c079dcf5f52a0e824a81e7f953b8661a20")}, - {105000, newShaHashFromStr("00000000000291ce28027faea320c8d2b054b2e0fe44a773f3eefb151d6bdc97")}, - {134444, newShaHashFromStr("00000000000005b12ffd4cd315cd34ffd4a594f430ac814c91184a0d42d2b0fe")}, - {168000, newShaHashFromStr("000000000000099e61ea72015e79632f216fe6cb33d7899acb35b75c8303b763")}, - {193000, newShaHashFromStr("000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317")}, - {210000, newShaHashFromStr("000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e")}, - {216116, newShaHashFromStr("00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e")}, - {225430, newShaHashFromStr("00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932")}, - {250000, newShaHashFromStr("000000000000003887df1f29024b06fc2200b55f8af8f35453d7be294df2d214")}, - {267300, newShaHashFromStr("000000000000000a83fbd660e918f218bf37edd92b748ad940483c7c116179ac")}, - {279000, newShaHashFromStr("0000000000000001ae8c72a0b0c301f67e3afca10e819efa9041e458e9bd7e40")}, - {300255, newShaHashFromStr("0000000000000000162804527c6e9b9f0563a280525f9d08c12041def0a0f3b2")}, - {319400, newShaHashFromStr("000000000000000021c6052e9becade189495d1c539aa37c58917305fd15f13b")}, - {343185, newShaHashFromStr("0000000000000000072b8bf361d01a6ba7d445dd024203fafc78768ed4368554")}, - {352940, newShaHashFromStr("000000000000000010755df42dba556bb72be6a32f3ce0b6941ce4430152c9ff")}, - }, - - // Enforce current block version once majority of the network has - // upgraded. - // 75% (750 / 1000) - // Reject previous block versions once a majority of the network has - // upgraded. - // 95% (950 / 1000) - BlockEnforceNumRequired: 750, - BlockRejectNumRequired: 950, - BlockUpgradeNumToCheck: 1000, - - // Mempool parameters - RelayNonStdTxs: false, - - // Address encoding magics - PubKeyHashAddrID: 0x00, // starts with 1 - ScriptHashAddrID: 0x05, // starts with 3 - PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed) - - // BIP32 hierarchical deterministic extended key magics - HDPrivateKeyID: [4]byte{0x04, 0x88, 0xad, 0xe4}, // starts with xprv - HDPublicKeyID: [4]byte{0x04, 0x88, 0xb2, 0x1e}, // starts with xpub - - // BIP44 coin type used in the hierarchical deterministic path for - // address generation. - HDCoinType: 0, -} - -// RegressionNetParams defines the network parameters for the regression test -// Bitcoin network. Not to be confused with the test Bitcoin network (version -// 3), this network is sometimes simply called "testnet". -var RegressionNetParams = Params{ - Name: "regtest", - Net: wire.TestNet, - DefaultPort: "18444", - - // Chain parameters - GenesisBlock: ®TestGenesisBlock, - GenesisHash: ®TestGenesisHash, - PowLimit: regressionPowLimit, - PowLimitBits: 0x207fffff, - SubsidyHalvingInterval: 150, - ResetMinDifficulty: true, - GenerateSupported: true, - - // Checkpoints ordered from oldest to newest. - Checkpoints: nil, - - // Enforce current block version once majority of the network has - // upgraded. - // 75% (750 / 1000) - // Reject previous block versions once a majority of the network has - // upgraded. - // 95% (950 / 1000) - BlockEnforceNumRequired: 750, - BlockRejectNumRequired: 950, - BlockUpgradeNumToCheck: 1000, - - // Mempool parameters - RelayNonStdTxs: true, - - // Address encoding magics - PubKeyHashAddrID: 0x6f, // starts with m or n - ScriptHashAddrID: 0xc4, // starts with 2 - PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) - - // BIP32 hierarchical deterministic extended key magics - HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv - HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub - - // BIP44 coin type used in the hierarchical deterministic path for - // address generation. - HDCoinType: 1, -} - -// TestNet3Params defines the network parameters for the test Bitcoin network -// (version 3). Not to be confused with the regression test network, this -// network is sometimes simply called "testnet". -var TestNet3Params = Params{ - Name: "testnet3", - Net: wire.TestNet3, - DefaultPort: "18333", - - // Chain parameters - GenesisBlock: &testNet3GenesisBlock, - GenesisHash: &testNet3GenesisHash, - PowLimit: testNet3PowLimit, - PowLimitBits: 0x1d00ffff, - SubsidyHalvingInterval: 210000, - ResetMinDifficulty: true, - GenerateSupported: false, - - // Checkpoints ordered from oldest to newest. - Checkpoints: []Checkpoint{ - {546, newShaHashFromStr("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")}, - }, - - // Enforce current block version once majority of the network has - // upgraded. - // 51% (51 / 100) - // Reject previous block versions once a majority of the network has - // upgraded. - // 75% (75 / 100) - BlockEnforceNumRequired: 51, - BlockRejectNumRequired: 75, - BlockUpgradeNumToCheck: 100, - - // Mempool parameters - RelayNonStdTxs: true, - - // Address encoding magics - PubKeyHashAddrID: 0x6f, // starts with m or n - ScriptHashAddrID: 0xc4, // starts with 2 - PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) - - // BIP32 hierarchical deterministic extended key magics - HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv - HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub - - // BIP44 coin type used in the hierarchical deterministic path for - // address generation. - HDCoinType: 1, -} - -// SimNetParams defines the network parameters for the simulation test Bitcoin -// network. This network is similar to the normal test network except it is -// intended for private use within a group of individuals doing simulation -// testing. The functionality is intended to differ in that the only nodes -// which are specifically specified are used to create the network rather than -// following normal discovery rules. This is important as otherwise it would -// just turn into another public testnet. -var SimNetParams = Params{ - Name: "simnet", - Net: wire.SimNet, - DefaultPort: "18555", - - // Chain parameters - GenesisBlock: &simNetGenesisBlock, - GenesisHash: &simNetGenesisHash, - PowLimit: simNetPowLimit, - PowLimitBits: 0x207fffff, - SubsidyHalvingInterval: 210000, - ResetMinDifficulty: true, - GenerateSupported: true, - - // Checkpoints ordered from oldest to newest. - Checkpoints: nil, - - // Enforce current block version once majority of the network has - // upgraded. - // 51% (51 / 100) - // Reject previous block versions once a majority of the network has - // upgraded. - // 75% (75 / 100) - BlockEnforceNumRequired: 51, - BlockRejectNumRequired: 75, - BlockUpgradeNumToCheck: 100, - - // Mempool parameters - RelayNonStdTxs: true, - - // Address encoding magics - PubKeyHashAddrID: 0x3f, // starts with S - ScriptHashAddrID: 0x7b, // starts with s - PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed) - - // BIP32 hierarchical deterministic extended key magics - HDPrivateKeyID: [4]byte{0x04, 0x20, 0xb9, 0x00}, // starts with sprv - HDPublicKeyID: [4]byte{0x04, 0x20, 0xbd, 0x3a}, // starts with spub - - // BIP44 coin type used in the hierarchical deterministic path for - // address generation. - HDCoinType: 115, // ASCII for s -} - -var ( - // ErrDuplicateNet describes an error where the parameters for a Bitcoin - // network could not be set due to the network already being a standard - // network or previously-registered into this package. - ErrDuplicateNet = errors.New("duplicate Bitcoin network") - - // ErrUnknownHDKeyID describes an error where the provided id which - // is intended to identify the network for a hierarchical deterministic - // private extended key is not registered. - ErrUnknownHDKeyID = errors.New("unknown hd private extended key bytes") -) - -var ( - registeredNets = map[wire.BitcoinNet]struct{}{ - MainNetParams.Net: struct{}{}, - TestNet3Params.Net: struct{}{}, - RegressionNetParams.Net: struct{}{}, - SimNetParams.Net: struct{}{}, - } - - pubKeyHashAddrIDs = map[byte]struct{}{ - MainNetParams.PubKeyHashAddrID: struct{}{}, - TestNet3Params.PubKeyHashAddrID: struct{}{}, // shared with regtest - SimNetParams.PubKeyHashAddrID: struct{}{}, - } - - scriptHashAddrIDs = map[byte]struct{}{ - MainNetParams.ScriptHashAddrID: struct{}{}, - TestNet3Params.ScriptHashAddrID: struct{}{}, // shared with regtest - SimNetParams.ScriptHashAddrID: struct{}{}, - } - - // Testnet is shared with regtest. - hdPrivToPubKeyIDs = map[[4]byte][]byte{ - MainNetParams.HDPrivateKeyID: MainNetParams.HDPublicKeyID[:], - TestNet3Params.HDPrivateKeyID: TestNet3Params.HDPublicKeyID[:], - SimNetParams.HDPrivateKeyID: SimNetParams.HDPublicKeyID[:], - } -) - -// Register registers the network parameters for a Bitcoin network. This may -// error with ErrDuplicateNet if the network is already registered (either -// due to a previous Register call, or the network being one of the default -// networks). -// -// Network parameters should be registered into this package by a main package -// as early as possible. Then, library packages may lookup networks or network -// parameters based on inputs and work regardless of the network being standard -// or not. -func Register(params *Params) error { - if _, ok := registeredNets[params.Net]; ok { - return ErrDuplicateNet - } - registeredNets[params.Net] = struct{}{} - pubKeyHashAddrIDs[params.PubKeyHashAddrID] = struct{}{} - scriptHashAddrIDs[params.ScriptHashAddrID] = struct{}{} - hdPrivToPubKeyIDs[params.HDPrivateKeyID] = params.HDPublicKeyID[:] - return nil -} - -// IsPubKeyHashAddrID returns whether the id is an identifier known to prefix a -// pay-to-pubkey-hash address on any default or registered network. This is -// used when decoding an address string into a specific address type. It is up -// to the caller to check both this and IsScriptHashAddrID and decide whether an -// address is a pubkey hash address, script hash address, neither, or -// undeterminable (if both return true). -func IsPubKeyHashAddrID(id byte) bool { - _, ok := pubKeyHashAddrIDs[id] - return ok -} - -// IsScriptHashAddrID returns whether the id is an identifier known to prefix a -// pay-to-script-hash address on any default or registered network. This is -// used when decoding an address string into a specific address type. It is up -// to the caller to check both this and IsPubKeyHashAddrID and decide whether an -// address is a pubkey hash address, script hash address, neither, or -// undeterminable (if both return true). -func IsScriptHashAddrID(id byte) bool { - _, ok := scriptHashAddrIDs[id] - return ok -} - -// HDPrivateKeyToPublicKeyID accepts a private hierarchical deterministic -// extended key id and returns the associated public key id. When the provided -// id is not registered, the ErrUnknownHDKeyID error will be returned. -func HDPrivateKeyToPublicKeyID(id []byte) ([]byte, error) { - if len(id) != 4 { - return nil, ErrUnknownHDKeyID - } - - var key [4]byte - copy(key[:], id) - pubBytes, ok := hdPrivToPubKeyIDs[key] - if !ok { - return nil, ErrUnknownHDKeyID - } - - return pubBytes, nil -} - -// newShaHashFromStr converts the passed big-endian hex string into a -// wire.ShaHash. It only differs from the one available in wire in that -// it panics on an error since it will only (and must only) be called with -// hard-coded, and therefore known good, hashes. -func newShaHashFromStr(hexStr string) *wire.ShaHash { - sha, err := wire.NewShaHashFromStr(hexStr) - if err != nil { - // Ordinarily I don't like panics in library code since it - // can take applications down without them having a chance to - // recover which is extremely annoying, however an exception is - // being made in this case because the only way this can panic - // is if there is an error in the hard-coded hashes. Thus it - // will only ever potentially panic on init and therefore is - // 100% predictable. - panic(err) - } - return sha -} diff --git a/chaincfg/premine.go b/chaincfg/premine.go new file mode 100644 index 00000000..1814c402 --- /dev/null +++ b/chaincfg/premine.go @@ -0,0 +1,3172 @@ +// Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chaincfg + +// BlockOneLedgerMainNet is the block one output ledger for the main +// network. +var BlockOneLedgerMainNet = []*TokenPayout{ + &TokenPayout{"DsaAKsMvZ6HrqhmbhLjV9qVbPkkzF5daowT", 5000 * 1e8}, + &TokenPayout{"DsaAtdVq78c4zM7fhHbbRHPd8UoZn91ZoR3", 5000 * 1e8}, + &TokenPayout{"DsabRUzLFikdEx62WeSyFjvqVg7i8DkuTjo", 5000 * 1e8}, + &TokenPayout{"DsaiQhBpjr2Xq9T6Zso5vAFEPgUEWVFKVC9", 5000 * 1e8}, + &TokenPayout{"DsaPv23JyLgjrYFmXnHdiou1DYpyAykCkZb", 5000 * 1e8}, + &TokenPayout{"DsaSkJbREyuYvFYgJdatxRJNSREXFWtJc5H", 5000 * 1e8}, + &TokenPayout{"DsaxUwRcbWg559c1gWECY9Ei5VPTrd9vXrb", 5000 * 1e8}, + &TokenPayout{"DsbaqJC3h9DJmMGdy8hNddNQhiFJi6p5VoM", 5000 * 1e8}, + &TokenPayout{"DsbC3ywTnt97xMdhwq9Vma75wZotpFWWS1y", 5000 * 1e8}, + &TokenPayout{"DsbcjZNGqTeLhKnBapYTFwqmPxCwEeXCoZ1", 5000 * 1e8}, + &TokenPayout{"DsbDKNAwbFiBfaCeCJEmfmr8USbks2N8Mms", 5000 * 1e8}, + &TokenPayout{"DsbnFzaBDZXrecHSoafZDBNgMnN35WS7gaM", 5000 * 1e8}, + &TokenPayout{"DsbsfexxS6rg2qXCu4DcS5DVUFqYK1rtpJJ", 5000 * 1e8}, + &TokenPayout{"DsbxoArK7mr33zvHZ26pKbDTy6rxNG2xKaC", 5000 * 1e8}, + &TokenPayout{"DsbXWTjT9ft22GABuQjRzgXdBUHbyJFxnvT", 5000 * 1e8}, + &TokenPayout{"Dsc1o7eCSDr4z31gLkE81FvT3KNGKfDxcn1", 5000 * 1e8}, + &TokenPayout{"Dsc6V2ByWPETTSCTqL1EK8ef9LLAXYHh8WG", 5000 * 1e8}, + &TokenPayout{"DscE1ypiJywzn7YEjEJ46BpFUw82DqJNx5S", 5000 * 1e8}, + &TokenPayout{"Dscfxx4ABb2Lue4jiDthguPQ8V12RS1pXgd", 5000 * 1e8}, + &TokenPayout{"DscGenaJA3BdpU86RYxq1fpX2f83xpoZ3eo", 5000 * 1e8}, + &TokenPayout{"DscMxuze6h7J3wxAfKfY6csWBQnYjMwZ2dA", 5000 * 1e8}, + &TokenPayout{"DscqwvaBFwcLGvGzVfVv4j8Gq8NxxiJyVhN", 5000 * 1e8}, + &TokenPayout{"DscVtzQ6gGifNFvbPSg2LUohgkoqPPKMTvQ", 5000 * 1e8}, + &TokenPayout{"Dsd9WpiUjaK8cJgWTngBwdk4HFqqQe7NMKm", 5000 * 1e8}, + &TokenPayout{"DsdCdzo4oya45m3AV8aGtrZuSokzN4togLg", 5000 * 1e8}, + &TokenPayout{"DsdEgYzjSokBhZ12Nd3UKqmkVdYptNn41co", 5000 * 1e8}, + &TokenPayout{"DsdeJWst5M5d6jZFnHHaasKciZtKVzGLVWT", 5000 * 1e8}, + &TokenPayout{"Dsdex51H8afbt9aVgGHAjvACgXTAM6QmSEH", 5000 * 1e8}, + &TokenPayout{"DsdjDtSwhQcLbmrg5Muwb3cFNT44Uq927kP", 5000 * 1e8}, + &TokenPayout{"DsdmHdBWWUFHfhsJxXmvL3DqLLrbo65B3KP", 5000 * 1e8}, + &TokenPayout{"DsdqBFwWrkQdgytzre5Msh7myssoCVJCjRB", 5000 * 1e8}, + &TokenPayout{"DsdQzmpXKqLWtzKQzhjW8yD5GrQ13J3A8fq", 5000 * 1e8}, + &TokenPayout{"Dse4dbCL8rRtAFQV46TAkqNXeGX3bh8RNrv", 5000 * 1e8}, + &TokenPayout{"Dse78aC4tE4eUhuyhYXN4nKDxb3v3rVo5Ys", 5000 * 1e8}, + &TokenPayout{"DseAaGdqwhTRKJMwrLXXVdy12uMyjU9hL8U", 5000 * 1e8}, + &TokenPayout{"DseE1xRuHQZcXEPEeymXhJfZFhEmSJqEkxu", 5000 * 1e8}, + &TokenPayout{"DseEGmiWfmf1LbFxCkAGHvFU9232YhpcjQK", 5000 * 1e8}, + &TokenPayout{"DsempyEYc56TtxkyXCSK6ARFWg6nohNRrLw", 5000 * 1e8}, + &TokenPayout{"Dsep8wzzJhMrbPBCe2cf51cnbD1dpjVaQqV", 5000 * 1e8}, + &TokenPayout{"DseSYHN81JQ2DnymVBxEGJ8q6AQmVfcncm3", 5000 * 1e8}, + &TokenPayout{"DsewzwQH9NraNVy3LE6yzvPQfxNdbZUgUah", 5000 * 1e8}, + &TokenPayout{"DseXxQhfssHzwS6GtD7EuLCGbaon8SvFxDo", 5000 * 1e8}, + &TokenPayout{"DsfBHUhvUnCQ4wqvp21wAoy9hrrz4c7N78T", 5000 * 1e8}, + &TokenPayout{"DsfM8YQWXLMH7jvfjMJdz85hQwKnNjMeeFB", 5000 * 1e8}, + &TokenPayout{"DsfuNLpJyTTmM6hN1HEStpfNBScFNdJ2r3g", 5000 * 1e8}, + &TokenPayout{"Dsg7CocZtU5NH7gWLzwBcFoBfW2Dud5uL3q", 5000 * 1e8}, + &TokenPayout{"Dsg7sHLPDhSLv7TyZPoz5Q8CD6L5mxNsKDq", 5000 * 1e8}, + &TokenPayout{"Dsg8amRtv4pNFmsWkiZncT7zmXtuKDEUjjW", 5000 * 1e8}, + &TokenPayout{"Dsg8Gd815HgUXJWmqqHe5RBLWv8S5tfEyjV", 5000 * 1e8}, + &TokenPayout{"Dsg8Gd815HgUXJWmqqHe5RBLWv8S5tfEyjV", 5000 * 1e8}, + &TokenPayout{"Dsg92M45V3RVdcjSkiGpeV9XEdCRkkJY3wD", 5000 * 1e8}, + &TokenPayout{"DsgBvoFtQownZbNmDppRJcyGZpNTDhv2zDd", 5000 * 1e8}, + &TokenPayout{"DsggU1gi7gcbRn6xitjhUTjttzYQXx7jSBP", 5000 * 1e8}, + &TokenPayout{"DsgMRSDo8rKSqA2AuuerJ9bp84dtuZZQRDL", 5000 * 1e8}, + &TokenPayout{"DsgqZpzEPd6eaLL36zBAXEftfQbsfTupMUx", 5000 * 1e8}, + &TokenPayout{"Dsguf7n4fqCe3RxE62WkM7rtthbKRoJjYPw", 5000 * 1e8}, + &TokenPayout{"DsgugjeYe8uoRcxGhR85jPE1S54LVGud54K", 5000 * 1e8}, + &TokenPayout{"DsgW8Lq8VEkQG41awKizNKWPRHayRUqAMtm", 5000 * 1e8}, + &TokenPayout{"DsgY1zkXvgKAxbQctvepChxTdJjNLGznoLa", 5000 * 1e8}, + &TokenPayout{"DsgybvJcfxXR4dLHQ61feCsgyRSE857i9TH", 5000 * 1e8}, + &TokenPayout{"DshBnNEFjG3eMnQR5WrQe9CtMxUMNLFziLM", 5000 * 1e8}, + &TokenPayout{"DshdKtxhqX6Mi4iyvbYitaUYKE8M1FDyg9L", 5000 * 1e8}, + &TokenPayout{"DshdpiCoDKsGqXdvrvnuDfJh5sGxWz4hVuG", 5000 * 1e8}, + &TokenPayout{"Dshe2CYP8Mp66bgyAKkPhLAD1wsZrgQQVJK", 5000 * 1e8}, + &TokenPayout{"DshELqFQGFrTT1fx1wmXL8QvDXFByWpkdXN", 5000 * 1e8}, + &TokenPayout{"DshSr8BXiKRy946k5TptEuxkSRx5uDjZgm4", 5000 * 1e8}, + &TokenPayout{"Dsi6vLUd21x8Ej16AXjkYDSUiLTDaY8smar", 5000 * 1e8}, + &TokenPayout{"DsiDdL7rHXk6GzQCssdexJf7nf48RVQzdbH", 5000 * 1e8}, + &TokenPayout{"DsiHiXPpr4g54JmckGfySzJ6JSPRXevrnca", 5000 * 1e8}, + &TokenPayout{"DsiKKbtdKwvFKHdhyG1VMxrRBjn3oTjZbEH", 5000 * 1e8}, + &TokenPayout{"DsiLLsATP3DCwjtVLEpVrFD2q1kfKhKTJVW", 5000 * 1e8}, + &TokenPayout{"DsiQkbaiT8sFH6xTB2xDze6YZqK2U5pzbpZ", 5000 * 1e8}, + &TokenPayout{"DsiVjx92QN6yga8xEwpt3a7ftWn1e1cyN1T", 5000 * 1e8}, + &TokenPayout{"Dsj8nzTA2Mvret4iNFXhLaPKij9asQEUCX4", 5000 * 1e8}, + &TokenPayout{"DsjbfDvk9Mk1PqVZHnmZbzLrsPbvJmdReke", 5000 * 1e8}, + &TokenPayout{"DsjCyFQ2kzbH3MCHid4eXAYEdQdC7rzVyam", 5000 * 1e8}, + &TokenPayout{"DsjE4YVCYs21wMECn23yhmbEQzsKh5BmUW9", 5000 * 1e8}, + &TokenPayout{"DsjFGnjpTHwZoNDstQXCbcKy9fhMdbRHkvL", 5000 * 1e8}, + &TokenPayout{"DsjGvrmYHSc3GFpd57Mc5ufL8HoSivBrHS7", 5000 * 1e8}, + &TokenPayout{"DsjoMVm6554n4ss8jU3L4ZntwjtGF9rswU6", 5000 * 1e8}, + &TokenPayout{"DsjSsrjar9cJdsBcH8DAKPhJbvXgryNXPTk", 5000 * 1e8}, + &TokenPayout{"DsjxewM9BK7bdWrYLUeieTNKFpekVQ7MVcE", 5000 * 1e8}, + &TokenPayout{"DsjXK87UEw5kjMi8CnebZq5ypgeDQYRTWgp", 5000 * 1e8}, + &TokenPayout{"Dsk6gwgrgLa8jpQBCje4od1Y4GzfWzQquNq", 5000 * 1e8}, + &TokenPayout{"DskpXkokrEMPpuYGM96c2SFfMAfQZyREiis", 5000 * 1e8}, + &TokenPayout{"DsktkivEFA7HPVhxef9Ejj243vSKcydYHeW", 5000 * 1e8}, + &TokenPayout{"Dsm38cfKMv2QUxdU88bC6gwnqrdTH5ys2jj", 5000 * 1e8}, + &TokenPayout{"Dsm4UgYuXXprdwQxGs2xB6ChcGxQDssr3X2", 5000 * 1e8}, + &TokenPayout{"Dsm6b6tf4RVfiKZVMADxKTUWVi21xXPrMer", 5000 * 1e8}, + &TokenPayout{"Dsm9V7mMv9uFTtEfWE48nhLCwhthqTHNrYx", 5000 * 1e8}, + &TokenPayout{"DsmmZsKzMb4TVQvM4bNTTo5fjTtp6cgDJCC", 5000 * 1e8}, + &TokenPayout{"DsmNT861wo8cXQzUmNr4pd4ePwpQLoT5HpN", 5000 * 1e8}, + &TokenPayout{"DsnaURntgJCEBP7iXnVGfeBCz45vNHuaxJA", 5000 * 1e8}, + &TokenPayout{"DsnCWj3UVaGNgX1u2Fxx2LDCH38Kkcmew4a", 5000 * 1e8}, + &TokenPayout{"DsnheHZYmcsr81iMgc1C3WVsxC3bgpjeKnA", 5000 * 1e8}, + &TokenPayout{"DsnjNWuPdhox2LS1r13zDXVnVVjBEqvmZVt", 5000 * 1e8}, + &TokenPayout{"Dsnm6K3qLKJpqzDEUJ1QvFAoQhRevWvzU4L", 5000 * 1e8}, + &TokenPayout{"Dsnqe1wH9avHcTP6GDqUdGvq5yBkTSxCQHN", 5000 * 1e8}, + &TokenPayout{"DsoYb4rTpkQT6vTuVyuTY1rDJVDeETw29yf", 5000 * 1e8}, + &TokenPayout{"Dsp6Y7cyxyaQN5EDKQ9y8ZQ5KpNVK2bFMoh", 5000 * 1e8}, + &TokenPayout{"Dsp8uqxTPrKT2dCJYXk2E9sjHBT7mBir2Jw", 5000 * 1e8}, + &TokenPayout{"DspBkApHzcUQDskVLHXo2pXg1DiquyccAJf", 5000 * 1e8}, + &TokenPayout{"DspCN8Aq8wmPZUCLzTfBzpyAbRewHVRXiHc", 5000 * 1e8}, + &TokenPayout{"DspGdvE6i5syx3T9HUn1CWwGNQBa2pV4CEw", 5000 * 1e8}, + &TokenPayout{"DsQyh5BZ6Y2UwriJGHxJ3AgEXCLv4XETj1P", 5000 * 1e8}, + &TokenPayout{"DsR1K4aidCRxR85LRyLv4B2mKENnW3mCpjY", 5000 * 1e8}, + &TokenPayout{"DsR2QowpsGeYecZMjLEHKabhtawwytE9anB", 5000 * 1e8}, + &TokenPayout{"DsRbz1aR9nsf2KB55785KkN4hDVeThZbtYN", 5000 * 1e8}, + &TokenPayout{"DsReBjMbNKdLtS6akXgSqiLeX14LZ8qDKVQ", 5000 * 1e8}, + &TokenPayout{"DsRhoMvysVkzDsEEkBXqCU981D1gRZAjszw", 5000 * 1e8}, + &TokenPayout{"DsRqajUX1tk16nCGojEq22mS49V3kCWfjqZ", 5000 * 1e8}, + &TokenPayout{"DsRViHLgGk9n2shaPJGsKnqq2Vvfqz3rrG3", 5000 * 1e8}, + &TokenPayout{"DsRwjEFQVd4SrSByWTiHim2iXakLkbxxDWk", 5000 * 1e8}, + &TokenPayout{"DsRww1SXZZQSmmMrdy2VBihS6pQ5j7K3aXh", 5000 * 1e8}, + &TokenPayout{"DsSHiCHj4J6WYcacTJkSDrrEu84yDUBo1ww", 5000 * 1e8}, + &TokenPayout{"DsSJDikWJr79neXZ2tYUaaWAqdDXnpsinw8", 5000 * 1e8}, + &TokenPayout{"DsSjK6r15okb9o5QML84anVi1eZLe1vbyb4", 5000 * 1e8}, + &TokenPayout{"DsSLWu9J7psQnnW8fxJ7NSi2LSZgHBV462Q", 5000 * 1e8}, + &TokenPayout{"DsSWntcLCfjEaphcYCkmBjE2D2Rqr46SbfP", 5000 * 1e8}, + &TokenPayout{"DsSYsbfwbqWzhZHwJGbNvFEjy5UvJJWngz7", 5000 * 1e8}, + &TokenPayout{"DsSYTqoRr4EvePwfW5YK7wufdqLuNxJht4J", 5000 * 1e8}, + &TokenPayout{"DsSZ5qoqiTmkTnpdBiQUW3RfLWYJ4S5cqo9", 5000 * 1e8}, + &TokenPayout{"DsT3an2q4BxLpkr9Bp9A3dqCfAKXdsseaeg", 5000 * 1e8}, + &TokenPayout{"DsTAJUvvU7Z8g5zfJqfwzUSVvLZQimDPSQn", 5000 * 1e8}, + &TokenPayout{"DsTSbUY32Uimnx27e1mmREhqcM9scTdtvE8", 5000 * 1e8}, + &TokenPayout{"DsTTGVRgLTmooehgBPuEkpeGiTp86rvysgR", 5000 * 1e8}, + &TokenPayout{"DsTu1ynromXaYEShPZLoHKHHskoLwaTxGb1", 5000 * 1e8}, + &TokenPayout{"DsTWHwApANhkCgwHfXfDoauC937NvJLxfnw", 5000 * 1e8}, + &TokenPayout{"DsTwjZvND1HWQ452Kc5pH6u8B4nJ2hkM5tn", 5000 * 1e8}, + &TokenPayout{"DsTxANwuJY6BetdHtFR1K7cuWXGgM5xrjnf", 5000 * 1e8}, + &TokenPayout{"DsTy2ff7WL6WGMVeAL4HHwDb8QzECWw6cu8", 5000 * 1e8}, + &TokenPayout{"DsTywCoFXNT9LBEteoLXQfMqdDPxHViHoKr", 5000 * 1e8}, + &TokenPayout{"DsTzH9QPMdDsmNpqz2zErs95TDLMURF7cqU", 5000 * 1e8}, + &TokenPayout{"DsUDgkTSYoXbi2qo9ZRiwBPgzq13eGfwfso", 5000 * 1e8}, + &TokenPayout{"DsUoYkQJwLgXaiHfyHBzacnFj1twmWcxVUy", 5000 * 1e8}, + &TokenPayout{"DsUTgHzHk8bv9k9rVQ9UL7imRmN56AE7gwm", 5000 * 1e8}, + &TokenPayout{"DsUYdCgAixSdnMx6szeeNMDnN44nsDY8sMr", 5000 * 1e8}, + &TokenPayout{"DsUzfyU75pNbbhHSjhpf6PUEvSywKTVzXBP", 5000 * 1e8}, + &TokenPayout{"DsV5bsUNChGYTiuZXEWEqMbKs3qH4YkxHD1", 5000 * 1e8}, + &TokenPayout{"DsV7FGcqiUJrsU8d4etwtXzbKinVFGUD8X8", 5000 * 1e8}, + &TokenPayout{"DsVEexbQ9VYNdfx3KG2JKT75gQPgsJmES1F", 5000 * 1e8}, + &TokenPayout{"DsVf9CWzwfTrub5khnHG19zFQMfL9oXTDDv", 5000 * 1e8}, + &TokenPayout{"DsViNGfMmSx3gEQW3cikxPW98m5HoZgMXu3", 5000 * 1e8}, + &TokenPayout{"DsViweCEE4vdT1DTto8xM2WX7NRdBW7PJT1", 5000 * 1e8}, + &TokenPayout{"DsVLHT5J6DTHhmNXCQ4N5t69hApzy3qX3Pm", 5000 * 1e8}, + &TokenPayout{"DsVXxdrQ3n1jstXyUmV1M28u6ASRHUoLkJN", 5000 * 1e8}, + &TokenPayout{"DsWevshRbe66Qy3den4TVHpNsyj8U41cxCE", 5000 * 1e8}, + &TokenPayout{"DsWqqZBhYF2rPZkmt4Qbb5csvvwDS8SJyus", 5000 * 1e8}, + &TokenPayout{"DsX6CwFG5gvfx7XQdnsDuHq5PZJtusQ11Sm", 5000 * 1e8}, + &TokenPayout{"DsXbMCMSGpJuPmVNrcxL1whSJXRmnVNewe8", 5000 * 1e8}, + &TokenPayout{"DsXCThTEH7NQbn4zWSjFtwzvsn9zgK3JCih", 5000 * 1e8}, + &TokenPayout{"DsXj72ZVnQq8UBhgJADyUyKkFA7zZCEBhET", 5000 * 1e8}, + &TokenPayout{"DsXmtX2h5wenb4bizszjeFobmoDMi2uY2gx", 5000 * 1e8}, + &TokenPayout{"DsXTRowtuqJkvFGXEzHVjXNLuae22Z8hZFY", 5000 * 1e8}, + &TokenPayout{"DsXVCdCYTig2Fc5SETqQqLrqZuj4ExKzp9s", 5000 * 1e8}, + &TokenPayout{"DsXwC3oWXf5esBUtqQunPSkxDD32L7NuTns", 5000 * 1e8}, + &TokenPayout{"DsYkGSvBVySiFK2VawJ1XkNUkK6Hopb7HhG", 5000 * 1e8}, + &TokenPayout{"DsYKhNg7N1Vw1bDPkk9nAYXmqKsfE6er4uy", 5000 * 1e8}, + &TokenPayout{"DsYSYEwryud617iy6Q2rHR674pNuZEJWYwV", 5000 * 1e8}, + &TokenPayout{"DsZayBDuLvDQNhj1AEAw6rVkUTU2SkEtbhv", 5000 * 1e8}, + &TokenPayout{"DsZFPAH88wFTV5JQMMBFM2zhxZT6mMbMecE", 5000 * 1e8}, + &TokenPayout{"DsZjE7dm4gWLMLJ59qtfj5C24bGrbKqB66c", 5000 * 1e8}, + &TokenPayout{"DsZnXFbDmHYcgS2WcUVBx1XCARbaD1uLU8N", 5000 * 1e8}, + &TokenPayout{"DsZr3VZSBEgcTJjjhyssvTmsBetdfKuBpsz", 5000 * 1e8}, + &TokenPayout{"DsZrsmHHNWXBJChg2zJ8NqSBEVdQ491Arbo", 5000 * 1e8}, + &TokenPayout{"DsZwjYRkwao6jxjJpS3wbAnxfvTUcdZVxkn", 5000 * 1e8}, + &TokenPayout{"DsZYbm59pHzMv5MEFDoTf4amnU6bTT8VUS7", 5000 * 1e8}, + &TokenPayout{"DsZMAYBSyVQKAprVYh5ChCs9ekG2z5obbVo", 2018.01801802 * 1e8}, + &TokenPayout{"DsnBB8TuTpqzaEpLZQiQMWxxTQBagApt2ZU", 809.00900901 * 1e8}, + &TokenPayout{"DsmwJYhjLLbcZ7wSMuqKzBnBQqBe2UESvjQ", 720.72072072 * 1e8}, + &TokenPayout{"DsZogwg3cGE4TnHUzdQAkq3N4SX8jCNpTGz", 630.63063063 * 1e8}, + &TokenPayout{"DshaRPc4VFfUpNC1F2nbpBkD6jpAUb1zk8h", 540.54054054 * 1e8}, + &TokenPayout{"DsZfr8VAfedf6Ryh22goXSNhZmAL4UMUTCP", 180.18018018 * 1e8}, + &TokenPayout{"DsnWt7abiAaymxHH9kSYYQt8iz3SMRuXWcf", 100.90089962 * 1e8}, + &TokenPayout{"Dsa1DJKsrpAGmSGiQMHWhN2gW5HBdEj4MBV", 282.63795424 * 1e8}, + &TokenPayout{"Dsa1GPGPi4PdNUiYJj6VVWmdFa8RsLdvFQG", 282.63795424 * 1e8}, + &TokenPayout{"Dsa1MFYCTnnfQvNG9TJ9h9eMuQKxA8EBusM", 282.63795424 * 1e8}, + &TokenPayout{"Dsa1s5RiChS7jbDVB9HohcLYt6qwgPD6L93", 282.63795424 * 1e8}, + &TokenPayout{"Dsa2aicrRyAQoKXvk8zuSviPKKCtXHQt8FE", 282.63795424 * 1e8}, + &TokenPayout{"Dsa2Ppu4ihXGUdcT6zeA9UysFKrw25AYVay", 282.63795424 * 1e8}, + &TokenPayout{"Dsa365edkBTKBDdtmpAH1xKn9qTK9ETXU4a", 282.63795424 * 1e8}, + &TokenPayout{"Dsa3H8jVqBbF4z8t9Xo5oUXCWNC57Anz2jD", 282.63795424 * 1e8}, + &TokenPayout{"Dsa4pAvTxQTVLr2U8jiXbQUH3hPbFNUDi28", 282.63795424 * 1e8}, + &TokenPayout{"Dsa4pQqLqFRfJqtUBvNeeNWy4TBdpa4J7ma", 282.63795424 * 1e8}, + &TokenPayout{"Dsa4PZJFFmSoYgPNZQYoHzsZi2mrvGAXZo7", 282.63795424 * 1e8}, + &TokenPayout{"Dsa4UeecaBMcJLyqratuh7zr8c9R3SaMWK4", 282.63795424 * 1e8}, + &TokenPayout{"Dsa4xHf4TpxuCUB2tQSqpci6U7qBdSvVPG8", 282.63795424 * 1e8}, + &TokenPayout{"Dsa58rVmuvzauSS4vCViTmfhkrGxwGnTW3Q", 282.63795424 * 1e8}, + &TokenPayout{"Dsa5bfH4bnfV3xtnTHbhfuooK4oMHA2S8i3", 282.63795424 * 1e8}, + &TokenPayout{"Dsa63rJ8DTGFSCj8kr5siyqC7XbSzofiaik", 282.63795424 * 1e8}, + &TokenPayout{"Dsa6e1BB6AUhxXpEzdFsQP44EUrAqWYwpib", 282.63795424 * 1e8}, + &TokenPayout{"Dsa6gXrY7niffshLyAXoyBqiWfQicV9xCYe", 282.63795424 * 1e8}, + &TokenPayout{"Dsa6tQ7Du5NeJgLQ1TYy2ad6HeMDPgsqQns", 282.63795424 * 1e8}, + &TokenPayout{"Dsa7ihepVZUM9uTQaVvujgykipKt38H1gHs", 282.63795424 * 1e8}, + &TokenPayout{"Dsa7kcKbAGRDSYv5EgK9EGN4HfG5xcWoR2G", 282.63795424 * 1e8}, + &TokenPayout{"Dsa7ULfg5VgwUSTegQnwBvvurbvA2EL3gK5", 282.63795424 * 1e8}, + &TokenPayout{"Dsa7vQr6BB1pFv2GtLMEbfErMbahvTyC94i", 282.63795424 * 1e8}, + &TokenPayout{"Dsa8M5HjeGrBM1MESKYazevcpZyDSy8FMfu", 282.63795424 * 1e8}, + &TokenPayout{"DsaA4R5kiPLvCTmTNNqN1Lsi9jYhhvK5JTn", 282.63795424 * 1e8}, + &TokenPayout{"DsaACX6yedYf2fqBFRKmSuN2UvdGu2sLT1z", 282.63795424 * 1e8}, + &TokenPayout{"DsaakpWqsqYdZpB7yigM9HDFiTL6iReUWQb", 282.63795424 * 1e8}, + &TokenPayout{"DsaAu5YbNjd6NyUC6dv1jvfQRoP5oaZdRiB", 282.63795424 * 1e8}, + &TokenPayout{"DsaaxFPr8GUMvi3WgtwygEXyrmi3SHaUYPw", 282.63795424 * 1e8}, + &TokenPayout{"DsaB6nQufW49a1s42yJPKNJHMfeSoLWyFUK", 282.63795424 * 1e8}, + &TokenPayout{"DsaBHyMs28SUq2Bgo4oiCuPfnUwn7voTrq3", 282.63795424 * 1e8}, + &TokenPayout{"DsabLYwSEd3CCSKnESmtn5kzrYxxHkUS3ug", 282.63795424 * 1e8}, + &TokenPayout{"DsaBsBhwBH1sL3CSTwoj1sELrHS3Mk2ybKo", 282.63795424 * 1e8}, + &TokenPayout{"DsacsrskAo9Guis5dGt8fAvmqqtLE4D25hm", 282.63795424 * 1e8}, + &TokenPayout{"DsaCvbuWwSttRCr9fTKEB6bPutoUYoLfQiD", 282.63795424 * 1e8}, + &TokenPayout{"DsacwYGgsG1rK9XoBDevgqNNrFyAbD41bfW", 282.63795424 * 1e8}, + &TokenPayout{"DsaCzGjtPqcV3sK9Kg93CUN5hkaD9H4hU37", 282.63795424 * 1e8}, + &TokenPayout{"DsaDeSjmAV7rVPGJyVALs5PrYMxk84qXi6j", 282.63795424 * 1e8}, + &TokenPayout{"DsaDgde3p5Ms5bR1PTQuyQ5Xz4ZzfPrBYEQ", 282.63795424 * 1e8}, + &TokenPayout{"DsaDWQWEk4Z6z5H9kbdVctdcGoFn7m6NCfM", 282.63795424 * 1e8}, + &TokenPayout{"DsaDYm6ojy1yY83Kq6exYAAuE9L2VyJbLop", 282.63795424 * 1e8}, + &TokenPayout{"DsaDZDzn1kwCkbUv2hEv6pmpknj4mYY1bvA", 282.63795424 * 1e8}, + &TokenPayout{"DsaEaE5gtFeqNdquUanhNef975xm7uJFCjm", 282.63795424 * 1e8}, + &TokenPayout{"DsaEfENtmspgeewHUYMdXpxMv6cZsJhWd1b", 282.63795424 * 1e8}, + &TokenPayout{"DsaEqXhJg33pyYwvZmQHGAw6Q9bcWQBEsWj", 282.63795424 * 1e8}, + &TokenPayout{"Dsaevtds1AwMw1T9XAX76qu1S3NChKReps1", 282.63795424 * 1e8}, + &TokenPayout{"DsaF2ckALxUgB2NrCSdySn6X8ciD2h6iMm6", 282.63795424 * 1e8}, + &TokenPayout{"DsaF3Yim2wkhKiJZN94ouCAagcmk1sJcggs", 282.63795424 * 1e8}, + &TokenPayout{"Dsaf7tp8hMH94264FjCSfppB1jYiJQbkigm", 282.63795424 * 1e8}, + &TokenPayout{"DsafLWQSxHqpQet1V7uxsN8b5Hcgwy7x2dW", 282.63795424 * 1e8}, + &TokenPayout{"DsafSM8rp9Ez1mE9yPaszp6eS8fe3vLhyMd", 282.63795424 * 1e8}, + &TokenPayout{"DsaFxW4eCBNnGtwT4F4CZWLmP3Ve3Vjny83", 282.63795424 * 1e8}, + &TokenPayout{"DsaGxHpUqBjXJ3yHNvH6u7CYYhN39uSi5QA", 282.63795424 * 1e8}, + &TokenPayout{"DsahfPuBi7H2KG8gc76PYvCrctAGYH6kF6X", 282.63795424 * 1e8}, + &TokenPayout{"DsahNMd2qHXRiMvgogVRxxtHvNi6vgqRbY8", 282.63795424 * 1e8}, + &TokenPayout{"DsahVUJiDCMeqByg3VY2r9otnF5Pv6QSDvR", 282.63795424 * 1e8}, + &TokenPayout{"DsaJmL5t5R8ssiyjJSSNH1UP5DZhgbRiu5r", 282.63795424 * 1e8}, + &TokenPayout{"DsajmSQUcGcHSVQx89xkGSyhgJwU3P2hfaQ", 282.63795424 * 1e8}, + &TokenPayout{"DsaJqJUb4YsezHSALYaUZzrzd7X5cGJvHgo", 282.63795424 * 1e8}, + &TokenPayout{"Dsak8FZQdCVxQfqjJwThaUbLaH9D429DWC7", 282.63795424 * 1e8}, + &TokenPayout{"DsaKJ3cGR243ZGp5HWm9wBKWt56aTqYRVUa", 282.63795424 * 1e8}, + &TokenPayout{"DsaKjtegrK5jwW9SyXPTfbyzrU9eBKMjAEY", 282.63795424 * 1e8}, + &TokenPayout{"DsaKKcAL7Ji2LWYCFnvNA5xfP3hyvb6R3e9", 282.63795424 * 1e8}, + &TokenPayout{"DsaKLfUpW6LGLPZYxnBw55rXTjS4E9s3MJP", 282.63795424 * 1e8}, + &TokenPayout{"DsaLfpBUUNXcKiWSQSaErr52boGFDrt7eyW", 282.63795424 * 1e8}, + &TokenPayout{"DsaLHTGYSJK2EeJmgTPJ1azy649bcy11Lxt", 282.63795424 * 1e8}, + &TokenPayout{"DsaLNu32a6yfRfDaKc7GqNuN3cs5xq4gx9H", 282.63795424 * 1e8}, + &TokenPayout{"DsamjUT3vXgCvx3okjoywHb8RiuYwP2u2ew", 282.63795424 * 1e8}, + &TokenPayout{"DsaMTd4aLNN6ypmkHLC9eas38iGnqGxZZLp", 282.63795424 * 1e8}, + &TokenPayout{"DsamtWN8EyN4TcAVDDgSNutXTeJUzBeWRpj", 282.63795424 * 1e8}, + &TokenPayout{"Dsan5gomPuUFxhFe1Pi7iXj1Bxt2Vg3QL49", 282.63795424 * 1e8}, + &TokenPayout{"Dsan6Hhun72jABX7UdwLNaEL5xQSBuqGc3D", 282.63795424 * 1e8}, + &TokenPayout{"Dsanbtj31mCNoN55KsNADj911d33YrbX9UC", 282.63795424 * 1e8}, + &TokenPayout{"DsanfGhNhgCcp8PabhCzhjAVQNQYwrpGyFK", 282.63795424 * 1e8}, + &TokenPayout{"DsaNgVEX2oR28uMyMevz6vtkuybkMq2cYGK", 282.63795424 * 1e8}, + &TokenPayout{"Dsao7WBUm7gr1qaZ8ywkjjJDUUKZTdGLpWq", 282.63795424 * 1e8}, + &TokenPayout{"Dsao8swBUKPs9B3t3hC85YMcBTHjzkJGrw9", 282.63795424 * 1e8}, + &TokenPayout{"DsaoiwLfdqsnqzDNvxDwn1GgSnwU2SnwBRi", 282.63795424 * 1e8}, + &TokenPayout{"DsapCd83rknEzVhgdxaFaCyjoRbvtfE8UNv", 282.63795424 * 1e8}, + &TokenPayout{"DsaPh4ZQ3aohUigiQ4kaXZMBWrNC9hGofW4", 282.63795424 * 1e8}, + &TokenPayout{"DsaPMUBU9yu2YqK9swyiqAv5igNRaz89U93", 282.63795424 * 1e8}, + &TokenPayout{"DsaPNd5rXGZ5zX3H2iRdicSgN6XS5T2QYik", 282.63795424 * 1e8}, + &TokenPayout{"DsapSMahaYEkNPwk7QgHKNQP84y88h2o9Qc", 282.63795424 * 1e8}, + &TokenPayout{"DsaQ8CJuQu1oBtno297G1TDHYVvevuCrfKy", 282.63795424 * 1e8}, + &TokenPayout{"DsaQb4JnThYw3EWD5n5uE8mF7vHBoVoegxo", 282.63795424 * 1e8}, + &TokenPayout{"DsaqC8rHWczvLTzLggpkJMMLGyGKdpiM8Lu", 282.63795424 * 1e8}, + &TokenPayout{"DsaRmBBiPe5TqgpkpKMMZxiN4ChGujigYPJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsart2UG9BXpXimv2SMJJ2KoJiVsUKaWkcc", 282.63795424 * 1e8}, + &TokenPayout{"DsaRzvYpxfVa3FVpQxSJWyEvE8NCX8b8L4B", 282.63795424 * 1e8}, + &TokenPayout{"Dsas3f1sGUjxjFX5cMju7tsEckS6pC1tGV4", 282.63795424 * 1e8}, + &TokenPayout{"DsasEnKmoDDnfeEkDzSTQ6P55U4j9agM2zm", 282.63795424 * 1e8}, + &TokenPayout{"DsasqVD3hCH4YV1B4j9UN84He6QKKne6isg", 282.63795424 * 1e8}, + &TokenPayout{"DsasU7QMdeS42kK9tYEn6xhP15S2E3CBMLU", 282.63795424 * 1e8}, + &TokenPayout{"DsasuERbs6d8FNYEmZVVptq7hF5vpDNGtKn", 282.63795424 * 1e8}, + &TokenPayout{"DsasVgYNz4czdsrnhNB4eaPzoKGFZcgwHMy", 282.63795424 * 1e8}, + &TokenPayout{"DsasWAuvCiZzy4uB11hfBsZnK3RAmGuW76k", 282.63795424 * 1e8}, + &TokenPayout{"Dsat2RAVx9eZDZpndksCxoP1nmQDaz2fjes", 282.63795424 * 1e8}, + &TokenPayout{"DsatbCd2m7YonxHt2E5Dy7PVxi765ok3pNF", 282.63795424 * 1e8}, + &TokenPayout{"DsaTMVNtqFwGQmBbbLNwrhkXgddJeorraKF", 282.63795424 * 1e8}, + &TokenPayout{"DsaTvEytU6gMk7m7hahtZsytA42gu4tFdca", 282.63795424 * 1e8}, + &TokenPayout{"DsaTWNdspEzy94DjW1bEXmQRJkYpHuNg2Q4", 282.63795424 * 1e8}, + &TokenPayout{"Dsau1JfgDEbX4NQ3PhfdjVUtzFtCiZkMviV", 282.63795424 * 1e8}, + &TokenPayout{"DsaU1YyA7Azj3qhmECwrjuE9HvNrvNvsVGj", 282.63795424 * 1e8}, + &TokenPayout{"DsauE4gwRVA2VtpZCsBCnQ7rqsu6EkZB6mW", 282.63795424 * 1e8}, + &TokenPayout{"Dsaufhk3mGKZXnizmgTU5qApvAzwnVKfHku", 282.63795424 * 1e8}, + &TokenPayout{"DsaUmSBYDs23ZXw5X2YJFHUpNvztSnHH2Qk", 282.63795424 * 1e8}, + &TokenPayout{"DsaUmxwCq1WYHdTPDfrKCNHh5wvUrkwVdNT", 282.63795424 * 1e8}, + &TokenPayout{"DsaUP1GYyrVfZbeoGqy98QoHMCKTSG5Pkod", 282.63795424 * 1e8}, + &TokenPayout{"DsaUPox7uaWKY5EYxP7NpDBXMmPQgb8psFK", 282.63795424 * 1e8}, + &TokenPayout{"DsauUsLLzRmDMepoaf6KZ3nAvwY49BdX5Nx", 282.63795424 * 1e8}, + &TokenPayout{"DsaVi33ujNYauVFENxqAeDzjafPyKg5qmvj", 282.63795424 * 1e8}, + &TokenPayout{"DsavkrQftjsJJ2LwRZKraJwSBn1iyNcMc77", 282.63795424 * 1e8}, + &TokenPayout{"DsaVL8VQ3n3PxBA2tFvgapnXeSwwk7W5Anj", 282.63795424 * 1e8}, + &TokenPayout{"Dsavpj42U81mUf3X1Ea9iq8KnVmV3CiGHQ1", 282.63795424 * 1e8}, + &TokenPayout{"Dsavrg3RGs7dmnACT7c2W2TPVSt8EwG8ZMq", 282.63795424 * 1e8}, + &TokenPayout{"DsaVS4rv8mhNVeUurTgr6Nfjv5v4btNdyUe", 282.63795424 * 1e8}, + &TokenPayout{"DsaW3GN3NjmNJTuGPcxk18jnft4Ln9vwzTf", 282.63795424 * 1e8}, + &TokenPayout{"Dsaw9F4RG3LQS6W76sFcNdSkTd7fLnBGraq", 282.63795424 * 1e8}, + &TokenPayout{"DsaWgVmeXzGx2xauQcNWow4hApyhEqW2Jrh", 282.63795424 * 1e8}, + &TokenPayout{"DsaWTXen1vhq1pTp6oSDxfmGbHC7Pk1QPQG", 282.63795424 * 1e8}, + &TokenPayout{"DsaWyqgKTVWCFkped3eUyB444egXk1s4hhP", 282.63795424 * 1e8}, + &TokenPayout{"DsaWzaUcb4fdmf7En3gX6LXXzaARcZmoNiC", 282.63795424 * 1e8}, + &TokenPayout{"DsaX8M7Vj6smBpwKLokKxbDnKprFMDKkS8W", 282.63795424 * 1e8}, + &TokenPayout{"DsaxAT73HNYnrYDn5p7W8qE4RYxq9yJCKiY", 282.63795424 * 1e8}, + &TokenPayout{"DsaXBLm26Cdbd1w4RbqmZ36DdwnJQeEMyzS", 282.63795424 * 1e8}, + &TokenPayout{"DsaXizpjjR188HH6kF7NFFQf7dC1NhKeXGL", 282.63795424 * 1e8}, + &TokenPayout{"DsaXmgYy6dFFc3QBioTzh7MerUmvNv6KSR5", 282.63795424 * 1e8}, + &TokenPayout{"DsaXtZGB1CWsTBRG7peebfcJomJfkWukJhn", 282.63795424 * 1e8}, + &TokenPayout{"DsaY4LSzqDrznEuezv4DYXgeBgoUxpWgXSw", 282.63795424 * 1e8}, + &TokenPayout{"DsayzeQFSD574759cHUaWZXM8LtgRQh5Qpk", 282.63795424 * 1e8}, + &TokenPayout{"DsazCqEyfhHsH2tC7MTV4eWxsdX9rfgrGFi", 282.63795424 * 1e8}, + &TokenPayout{"DsaZkpHuqnPqABiVPNNrGhxfmCQhSUDdC8Y", 282.63795424 * 1e8}, + &TokenPayout{"DsaZt7g7iG7ceRdrCbsoNwT8ARTrPPnDU1f", 282.63795424 * 1e8}, + &TokenPayout{"Dsb1gPbzkM2MXJRo2qfzYTr3KKFfaJf59an", 282.63795424 * 1e8}, + &TokenPayout{"Dsb1gvWhJdTeYDK6ZrzhWt5yhXdrs81Q7vb", 282.63795424 * 1e8}, + &TokenPayout{"Dsb1skD8qr27ezHRSnWdCrHiheWn4yR3nPX", 282.63795424 * 1e8}, + &TokenPayout{"Dsb2BUwjsRy4e8vkcbAbMvSJNeMsXdkP5zA", 282.63795424 * 1e8}, + &TokenPayout{"Dsb2QC9rj76fMWwLHCrWn2oAVhsuyQ2qESf", 282.63795424 * 1e8}, + &TokenPayout{"Dsb31hbGzTcHdKcR2ePLeyztw8brkXpK1Bw", 282.63795424 * 1e8}, + &TokenPayout{"Dsb3EQbNWKDbNkN9Ztv6vreAMMZdfpXy96T", 282.63795424 * 1e8}, + &TokenPayout{"Dsb4ywaoRrVYxdTm3xwNAm6RUMdcJeKAZtW", 282.63795424 * 1e8}, + &TokenPayout{"Dsb5sZnVpXvaTwNoWZfuovyYvmcoEXrNgDm", 282.63795424 * 1e8}, + &TokenPayout{"Dsb6eB4uogjjEBssKeRdYrEAvZuW2xuzUzY", 282.63795424 * 1e8}, + &TokenPayout{"Dsb6Mc9TgGV1i3rzcbQeuWMni6Ve2sbXP2G", 282.63795424 * 1e8}, + &TokenPayout{"Dsb722FaVNaBhfJG1iURMPmzjepDWdGF6XW", 282.63795424 * 1e8}, + &TokenPayout{"Dsb7WiKyrHBGo5L2zEC7Y7pJigKKi3MZ1xu", 282.63795424 * 1e8}, + &TokenPayout{"Dsb8L3HS4ENi1iKuA7Xmmn1JqrcABBP5y1E", 282.63795424 * 1e8}, + &TokenPayout{"Dsb9Cyxgz5mfRxgYAHziRXvp8C5kjaq96sw", 282.63795424 * 1e8}, + &TokenPayout{"Dsb9DaUQSiwayvZ1ZhcYXiu4YFmPuBFwtWc", 282.63795424 * 1e8}, + &TokenPayout{"DsbA43vAtgaXSSKmHa5TXFXGewDggjaHNAY", 282.63795424 * 1e8}, + &TokenPayout{"DsbActC8uYYxjhrgUAGtLsHjvUp8ByKRK2F", 282.63795424 * 1e8}, + &TokenPayout{"DsbaRuwxdxfriC9dY55JF21e2kW4gicMyi4", 282.63795424 * 1e8}, + &TokenPayout{"DsbBb5r8XdidnytwPQdr7MXyDdCKTHacund", 282.63795424 * 1e8}, + &TokenPayout{"DsbBvpeYcwQeeuWstsSQ5bxXpDLwb6LtTZE", 282.63795424 * 1e8}, + &TokenPayout{"DsbC73PegsCFRKEt1DBGZjY6rr6HCEoLk7z", 282.63795424 * 1e8}, + &TokenPayout{"DsbCA8vxaJbA3kYnMPz2uYp2vzzavKCBJ5C", 282.63795424 * 1e8}, + &TokenPayout{"DsbcmYXLZxqmCGAj3Tsjmcy4G3GCqqgKJ8K", 282.63795424 * 1e8}, + &TokenPayout{"DsbCRbetvAHVpVtoTbt9Xs81PmTm5Zj7T8b", 282.63795424 * 1e8}, + &TokenPayout{"DsbCtZcaq3e3o9uPzt346PQNrkwgg9TFN6x", 282.63795424 * 1e8}, + &TokenPayout{"DsbD1KRLqZJWvMUKpuxRyRGegnf2heW9qjz", 282.63795424 * 1e8}, + &TokenPayout{"DsbdfcWuqPb4fqDXGxAkytCYusoRth7JuRw", 282.63795424 * 1e8}, + &TokenPayout{"DsbeEkkj44YZofzimB582dwJJHR5ZRKEz8j", 282.63795424 * 1e8}, + &TokenPayout{"DsbewHVpGpnBEU5ZivmGZN5uD96g5X6GcKL", 282.63795424 * 1e8}, + &TokenPayout{"DsbewxSUCTHkEaWZXj47hREnxeaBZcMqJDW", 282.63795424 * 1e8}, + &TokenPayout{"DsbF9Y9TWxuPT3SJ92f85vmsowm5bEBAYQJ", 282.63795424 * 1e8}, + &TokenPayout{"DsbFFK2hqEy93h153LT45NVcw1bBep6FQ6r", 282.63795424 * 1e8}, + &TokenPayout{"DsbFgy87vpx5c7Gtnywnm3Qb2Vki2V4ScBD", 282.63795424 * 1e8}, + &TokenPayout{"DsbFJ9X5eqqFtx5R8R2CTSepdvp4i7idtES", 282.63795424 * 1e8}, + &TokenPayout{"DsbgBmLSKwwLsDNRW8f9ndCT8hPDTseYmbB", 282.63795424 * 1e8}, + &TokenPayout{"DsbGESwFpxwECzSgt6nksGCuCoLtgzYZ7JQ", 282.63795424 * 1e8}, + &TokenPayout{"DsbGHUguPz8bkGpe3zRUzGLPLbSyFoQ6JU8", 282.63795424 * 1e8}, + &TokenPayout{"DsbGrrJ7ENR5xqFr3KXZZnS4SoRHLm2JCsT", 282.63795424 * 1e8}, + &TokenPayout{"DsbhLmrVTEXEBid7CrmnZJ8yeSiDPTosenT", 282.63795424 * 1e8}, + &TokenPayout{"DsbHRXRzR7q4vyoTuJdrryUwKXCJrGLydcF", 282.63795424 * 1e8}, + &TokenPayout{"DsbHxciNmk4UYzFSZ9DCJg8BNsfKcUX6JXM", 282.63795424 * 1e8}, + &TokenPayout{"DsbhXzepRsJEbbfBx2YJdsf8Dbwz9chgzTN", 282.63795424 * 1e8}, + &TokenPayout{"DsbHYUUDTcvyEVX7kskVSd1QCPqCm7yQwuU", 282.63795424 * 1e8}, + &TokenPayout{"DsbineGitcTewscd8y9u4mH4anC5g3gcuZj", 282.63795424 * 1e8}, + &TokenPayout{"Dsbiz9V7N1UW1vk7F5QwyaubMu2AR9mssTg", 282.63795424 * 1e8}, + &TokenPayout{"DsbJ8ws44KNfDbi3mHwSaKK2X6A4LcQor9Y", 282.63795424 * 1e8}, + &TokenPayout{"DsbjcvSCT5StVquohL9uCiAQd2YQretdzbH", 282.63795424 * 1e8}, + &TokenPayout{"DsbjMSKQjMgAP5HpS45nTSrB4S4NbLpZZjA", 282.63795424 * 1e8}, + &TokenPayout{"DsbjNoE5G3FwL6YXa3xwBZuoi92uA7QmjV8", 282.63795424 * 1e8}, + &TokenPayout{"DsbJwEwSsAuNFo7evLRfsvq6vTPJHqHw9ig", 282.63795424 * 1e8}, + &TokenPayout{"DsbJxK4LJTExUPBh6VRV8uMbreT9STDBdYM", 282.63795424 * 1e8}, + &TokenPayout{"Dsbk2MmgH9AJWjhA6CJpRuuRZDP5ahDgGT3", 282.63795424 * 1e8}, + &TokenPayout{"Dsbk9Y9Nv1FkhNrAbEJPZXjzZ6q55f7igWi", 282.63795424 * 1e8}, + &TokenPayout{"DsbkR1r2nWMr9MvpCy5kRDXQi39T8ytJWYB", 282.63795424 * 1e8}, + &TokenPayout{"DsbkyLpetY3MKP8xtrYy9HNN9XZ5YprJmPY", 282.63795424 * 1e8}, + &TokenPayout{"DsbLivsgpyReDnwNHkhhNq5S9DEg3kLThfS", 282.63795424 * 1e8}, + &TokenPayout{"DsbLvFzNNKZH5RWyWrrcxzaApcLmUc8b8dN", 282.63795424 * 1e8}, + &TokenPayout{"DsbLyhx8oroPAnV1rjdnTNkFxHmMFzQt5Hv", 282.63795424 * 1e8}, + &TokenPayout{"Dsbma3djyyaFv2Z8T22UwTjwMc73yF8k4F3", 282.63795424 * 1e8}, + &TokenPayout{"DsbmBqqcR3QvYbZt44GENxMummFGkqcrEKF", 282.63795424 * 1e8}, + &TokenPayout{"DsbmkrxR3WwoQUpoYCkApPNcGg4NX8vPuhX", 282.63795424 * 1e8}, + &TokenPayout{"DsbMV3BUREcLcQTLBU4PSnEpQ4Ns9juidjM", 282.63795424 * 1e8}, + &TokenPayout{"DsbMvL6wkY8MjuRWn8fAWs8cFUsEEhD8KCK", 282.63795424 * 1e8}, + &TokenPayout{"DsbmvM2J7Xz6S3n5u1snZAnq5HgnyGgDmrt", 282.63795424 * 1e8}, + &TokenPayout{"DsbNLqH2X62YpG7iqemuVsJyCYS9yZGpGMV", 282.63795424 * 1e8}, + &TokenPayout{"Dsbno3oxMj7tjV4ZcaGHsCV1w4G9Y6H7dVP", 282.63795424 * 1e8}, + &TokenPayout{"DsbNoRg1RHhyvHnvuJs2v3qgq16JfezCgaG", 282.63795424 * 1e8}, + &TokenPayout{"DsbNPK9jsTPX1aARoqGRasoUfL3YcrsWpVT", 282.63795424 * 1e8}, + &TokenPayout{"DsbNwwk94wcRhib6j3xSkoJaGZS4bgVspP9", 282.63795424 * 1e8}, + &TokenPayout{"DsbPDLg967awXcvjUwNZfrQwB4pbUiqmx7d", 282.63795424 * 1e8}, + &TokenPayout{"DsbpEXXbNurri7ChsZSpwAAHSRCra7AjKVp", 282.63795424 * 1e8}, + &TokenPayout{"DsbPGriwHFrVYNQrouTfWyzhDw3ZLithf3F", 282.63795424 * 1e8}, + &TokenPayout{"DsbPGW3ee4aeEdqaC1wdbzcsCqk4o2guWVs", 282.63795424 * 1e8}, + &TokenPayout{"DsbPL5b4kow6JnWrEwZMLrJJzmxjY7xxJkW", 282.63795424 * 1e8}, + &TokenPayout{"DsbpNmvApe8z25AtrZQ47VfnhAnEL9qCURL", 282.63795424 * 1e8}, + &TokenPayout{"DsbpxcG8ViHbXB3rHK2HA85D79JPTV92bZH", 282.63795424 * 1e8}, + &TokenPayout{"DsbQ2AmBdPab7LBuinWskSfSiMB8bQWySQF", 282.63795424 * 1e8}, + &TokenPayout{"DsbqcnAuwNYhArjrWrdtghSQSKuDs8iJe2X", 282.63795424 * 1e8}, + &TokenPayout{"DsbqvdMAGagXXdyufJjBkBmKdY4aLLTgapb", 282.63795424 * 1e8}, + &TokenPayout{"Dsbqw48S218dqYBj27KW4uusVcdt4ut8mDW", 282.63795424 * 1e8}, + &TokenPayout{"Dsbr1Tz3ttYVrF4HKAMg4RKeaxqUfa3zh1d", 282.63795424 * 1e8}, + &TokenPayout{"DsbR4XBsDN4QC2JABt9uxWo2PQWUSAXuCQS", 282.63795424 * 1e8}, + &TokenPayout{"DsbrUddkPaFfPoNX54JCQVDzYvjF8ZoN8zU", 282.63795424 * 1e8}, + &TokenPayout{"DsbS12QHsGdGHzN7LzcAvEsCtjaviALMSVW", 282.63795424 * 1e8}, + &TokenPayout{"DsbS7pPPM7AbZCTgKjJrssNJYcbQobFKCvL", 282.63795424 * 1e8}, + &TokenPayout{"Dsbs8hZUnParzqgtSpZgEGCUo5s1CCaYrMJ", 282.63795424 * 1e8}, + &TokenPayout{"DsbsDDY7LZ7xTsiZnbHRDyAVq3xy1BLhdKt", 282.63795424 * 1e8}, + &TokenPayout{"DsbSMEfGLvGMnNbJuHVbY72t7LxEM8SL6PJ", 282.63795424 * 1e8}, + &TokenPayout{"DsbsVJ3GGNAi14d3XDDCHFJhanE5Gvt3ETN", 282.63795424 * 1e8}, + &TokenPayout{"DsbT2REocs6myEmRYoqveVF79sK4y1SXdvf", 282.63795424 * 1e8}, + &TokenPayout{"DsbTJMeiLKa6SNERG2m1U2y9143qQrKcJik", 282.63795424 * 1e8}, + &TokenPayout{"DsbTn6v8b5bAssaKL3S6DgXd2v8vSgtWmgT", 282.63795424 * 1e8}, + &TokenPayout{"DsbTn9sGK5pD6fLecWtLb2p2GBwZbov4S7G", 282.63795424 * 1e8}, + &TokenPayout{"DsbtUdLbeH7qh5ghQmv1PTxpoW73tBtdZui", 282.63795424 * 1e8}, + &TokenPayout{"DsbUrMnKkLtZL4Y7qA3ZhN8ZgvqaLEdJCWU", 282.63795424 * 1e8}, + &TokenPayout{"DsbUSrWZTpqQreWRg1NE8YRpT5HRB5JtBUK", 282.63795424 * 1e8}, + &TokenPayout{"DsbuW7zJkV2z3a5WgV1kpxqNw3LC9CLSfSF", 282.63795424 * 1e8}, + &TokenPayout{"DsbUwtrAbkEkepsicM5MT4NGraepy9xAU65", 282.63795424 * 1e8}, + &TokenPayout{"DsbuXzDBF2Lb1BwRfhCrwcqeVR68kW1Cjqc", 282.63795424 * 1e8}, + &TokenPayout{"DsbvagEEsPTPkEdiqGB5Yvt33Fu98iqsTL2", 282.63795424 * 1e8}, + &TokenPayout{"DsbvqB1GdKS3nMYNS1cXSR6h88uUs6oENgZ", 282.63795424 * 1e8}, + &TokenPayout{"DsbvRWrSbYFkNNTWs8FS61qENeuPak1cNey", 282.63795424 * 1e8}, + &TokenPayout{"DsbvsAMEXZQGf1jkBahSqwWiv8deaPyJeki", 282.63795424 * 1e8}, + &TokenPayout{"DsbvXjBY35TxALRpHte3PWLCm5vcTrCDLwk", 282.63795424 * 1e8}, + &TokenPayout{"DsbW29YyPL7eDeJgPvrJzmdJBbt56fBuhYD", 282.63795424 * 1e8}, + &TokenPayout{"DsbW2wkjxRQ3y4BrJ5XMa8ibNRXPKJ5pjh4", 282.63795424 * 1e8}, + &TokenPayout{"DsbW3mwV7td9PGACCCYXd5ZC6fiBryCFu4A", 282.63795424 * 1e8}, + &TokenPayout{"Dsbw9bPM1TbpZrUTxNkW1GMSc2u49hy4gZi", 282.63795424 * 1e8}, + &TokenPayout{"DsbwCzRhGNVjCGmBryTrnN6KZoZomJXXRAU", 282.63795424 * 1e8}, + &TokenPayout{"DsbweYFv8qfw6EB9aMxmQ65ccqsV3ZYXySy", 282.63795424 * 1e8}, + &TokenPayout{"DsbwJdBsQyTYcLptvwWREUJE9JZKqB881Aq", 282.63795424 * 1e8}, + &TokenPayout{"DsbWTJ7yXQTbDXQ6rJ9sWSeM87Dvs9pLwR1", 282.63795424 * 1e8}, + &TokenPayout{"DsbXCvBPuiRAFewCYDhrFkeXGjuqwGGgA3A", 282.63795424 * 1e8}, + &TokenPayout{"DsbxD59BYvhduhh8d5x3Qjca2rsjsqQW6qd", 282.63795424 * 1e8}, + &TokenPayout{"DsbXEUseQDwuqRtEL7ar37iboCEiPNE8Dxe", 282.63795424 * 1e8}, + &TokenPayout{"DsbxMhEC1azdUkeZJPmjZHcKyGqGZbgLJbB", 282.63795424 * 1e8}, + &TokenPayout{"DsbxqDQZxnpUa12RdabgSz5tpn7WWaQKkvw", 282.63795424 * 1e8}, + &TokenPayout{"DsbXs1RNWfU6hgjfjWyJXkt5mdMKz1rSB4U", 282.63795424 * 1e8}, + &TokenPayout{"DsbXX8gibw7jrc7B48hCNHim8UZvEhGUV1M", 282.63795424 * 1e8}, + &TokenPayout{"DsbxXWYndDS9bzExHmj8QfCimW5EqocRur4", 282.63795424 * 1e8}, + &TokenPayout{"Dsby8YFWjHF87acKFLvJWJsF2QErJyVGxeS", 282.63795424 * 1e8}, + &TokenPayout{"DsbYb2ALGkoJUyiyDkosy7Sr8uutziYVuo2", 282.63795424 * 1e8}, + &TokenPayout{"Dsbywy8cwY5nanzknGRtihCNodGLq1bh9XL", 282.63795424 * 1e8}, + &TokenPayout{"DsbyXqFKTTp9ETVqZVxry64TAsECwyxv5Uo", 282.63795424 * 1e8}, + &TokenPayout{"DsbZctV2tSHgW6YyqiQ2CzSUM2Vcia72M35", 282.63795424 * 1e8}, + &TokenPayout{"DsbzwXJwBYrizcCkphhQZt4SbgQweXZd8cC", 282.63795424 * 1e8}, + &TokenPayout{"Dsc1sRZyRp3PjmFXAp4AKWnwYf6Uuf1wNqm", 282.63795424 * 1e8}, + &TokenPayout{"Dsc1WxuSETZqLvVwW3i3GuQCfAc2WzeSokg", 282.63795424 * 1e8}, + &TokenPayout{"Dsc2GV14uxtsxRnGwz8EwY2urp5fDeh7Mn5", 282.63795424 * 1e8}, + &TokenPayout{"Dsc33o3dNDQjwNWj5sDQsZKxVqpfq5xKbB2", 282.63795424 * 1e8}, + &TokenPayout{"Dsc4jwthpryMQ3h2AVWq7K8U6tCGdjssbF5", 282.63795424 * 1e8}, + &TokenPayout{"Dsc4tKTDd644BxiMYpQsDNViq33GrtgLith", 282.63795424 * 1e8}, + &TokenPayout{"Dsc538KPy236UmaPrdGkqXbwDzb6NRsQyMV", 282.63795424 * 1e8}, + &TokenPayout{"Dsc6woDbvb1noBiWsr3Cs4jcGhVcy5CYsUS", 282.63795424 * 1e8}, + &TokenPayout{"Dsc7KY4UHnpqMnT3Gk6fjrhPEm4VtFwYhkv", 282.63795424 * 1e8}, + &TokenPayout{"Dsc7nt4PypvanULSMXMEDv3xBhhYp9EhBtJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsc8fbA9Hp9o11hsezbMpRjFZu9AmQHm7K2", 282.63795424 * 1e8}, + &TokenPayout{"Dsc8KmjCGznrBnZDR4AipaiPSmnPgSWfQqo", 282.63795424 * 1e8}, + &TokenPayout{"Dsc8TVqVphQK2Mq16NNJJkZgahLD3EWgYD6", 282.63795424 * 1e8}, + &TokenPayout{"Dsc8xr7fBhPYnutw2HeZZFzAUJjxsjxZvME", 282.63795424 * 1e8}, + &TokenPayout{"Dsc9aCyQEGrByCbr4XqKhDEeEquuwTLrP5F", 282.63795424 * 1e8}, + &TokenPayout{"Dsc9gKFcABF9LBebRms8aUWY7pL4BAsiEYM", 282.63795424 * 1e8}, + &TokenPayout{"Dsc9JxGD8jws47ifMkqzw1A2zezW6apPx7i", 282.63795424 * 1e8}, + &TokenPayout{"Dsc9TCrhDKKCbXXKuZn8h7cWNRBTKXHjhjk", 282.63795424 * 1e8}, + &TokenPayout{"DscAyCbLP3186tm4NkveBEtTevsQpajnYwB", 282.63795424 * 1e8}, + &TokenPayout{"DscbhmXRiZPzCtDoWxjZ3bimQhuqjJx2AsU", 282.63795424 * 1e8}, + &TokenPayout{"DscbkgqrrKbXGnFULCQKcWHCLHvSmLZoDoa", 282.63795424 * 1e8}, + &TokenPayout{"DscBp9kjr3gtNWcr7H7mgf8ccqCerBABzJN", 282.63795424 * 1e8}, + &TokenPayout{"DscbZunNtERCDLfanFekyxVv3AsUB1RCmWB", 282.63795424 * 1e8}, + &TokenPayout{"DsccC6pFW8NpfuXDp7aTjfxmJsdAwBxzG61", 282.63795424 * 1e8}, + &TokenPayout{"DsccnGbCm9FPkhy7P6mJqb3sN98u5VLawQp", 282.63795424 * 1e8}, + &TokenPayout{"DsccTWPHnNp4oxLyAMtCCkFteN75TnpCHDE", 282.63795424 * 1e8}, + &TokenPayout{"Dsccxo7WvUsyHbgjzStdDY5JYEngQ2vECJQ", 282.63795424 * 1e8}, + &TokenPayout{"DscdcrUnfMHwbrLt3DJpsPiwsWMcDG3KkFS", 282.63795424 * 1e8}, + &TokenPayout{"DscEGXD9QC9TZJ1zSH4iWDWybptGDGWytyq", 282.63795424 * 1e8}, + &TokenPayout{"DsceHi3e6YnscYZtGRLSPL5kkibimWLWcQD", 282.63795424 * 1e8}, + &TokenPayout{"Dsceip75hkorSyQxeDoDqdm4Z7rNZjRivRS", 282.63795424 * 1e8}, + &TokenPayout{"DscfViJ9suYQStNfqFnwkEbmoT34VefqwAj", 282.63795424 * 1e8}, + &TokenPayout{"DscFX8q651drnD344ABSKRr3gr8Vt5DN55E", 282.63795424 * 1e8}, + &TokenPayout{"DscG1CGfT77ubGTkL69YkYKGTWPSVARqGva", 282.63795424 * 1e8}, + &TokenPayout{"DscgFRmi29fbTHrZ1PJtVV9WwpycCD58qRZ", 282.63795424 * 1e8}, + &TokenPayout{"DscHb9aEVN8vCrBG4ZhZcNyGmdEX5V9Xmst", 282.63795424 * 1e8}, + &TokenPayout{"DscHCAd69o5ch7LVpMGkGsKZN4gk8m9oieH", 282.63795424 * 1e8}, + &TokenPayout{"DschFkhuJx2ZE5Lit7CzE7bwvzvWUTce17M", 282.63795424 * 1e8}, + &TokenPayout{"DscHtBBNKQXoob82P2RMemGpfEau4FFJuwb", 282.63795424 * 1e8}, + &TokenPayout{"DschTkjsawZ1ZES3Ams6Jrc5anmeigr9X9E", 282.63795424 * 1e8}, + &TokenPayout{"DschUn9Jgcd2aLCK7sZKh9gmaD4cap3xqqs", 282.63795424 * 1e8}, + &TokenPayout{"DschVmBaWYHLncXJJjtLMmLZNeh5KtKH7g3", 282.63795424 * 1e8}, + &TokenPayout{"DscHxuXYFarJpjkixVe8mrgobn52dSxpkaZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsci9qxUncza2kHcvcLThVK2YEshAg57K8p", 282.63795424 * 1e8}, + &TokenPayout{"DsciSiSnnQVFi5A18b43dTh61CqRPRpfXES", 282.63795424 * 1e8}, + &TokenPayout{"DscJ8o4Hh4V5JiYrwPCnCXF3TJ98QX5d9Qu", 282.63795424 * 1e8}, + &TokenPayout{"Dscj9LEj6Y5be6eYc2E37G41CpL1ZhMwwK5", 282.63795424 * 1e8}, + &TokenPayout{"DscJC6UtB9vWVrWviU3oYWNPTo8dReJwCuX", 282.63795424 * 1e8}, + &TokenPayout{"Dscjfmg6nXPu3LEtG4BJEgCRnyq3xsYDGAE", 282.63795424 * 1e8}, + &TokenPayout{"DscJpQsK5du2FGtdMUTWsgHhQiBXfkSsyzw", 282.63795424 * 1e8}, + &TokenPayout{"DscjU266mBUjbwssdwvbq97tEXNHTsos6aZ", 282.63795424 * 1e8}, + &TokenPayout{"DscJWpFQat2FBWLRNVs6LZBcC8wp2B7TR2r", 282.63795424 * 1e8}, + &TokenPayout{"Dsck2pmx4khwLgX7nWKVVvxHbXD7CCtvNHU", 282.63795424 * 1e8}, + &TokenPayout{"Dsck9AhaKhdMfNvVkWdWBmuCYXi2TzHdBWL", 282.63795424 * 1e8}, + &TokenPayout{"DsckHPXqgCUg8RGjASJmPNzpk6xmunPmy1Y", 282.63795424 * 1e8}, + &TokenPayout{"DsckLfd7UG6VK3tdN8XqCfR85FZrtuZCRzw", 282.63795424 * 1e8}, + &TokenPayout{"DscktpGF38bKFyQVxw4fAsqkGx5S6KgmiMo", 282.63795424 * 1e8}, + &TokenPayout{"DsckUWzrZvg2Lk3yn3eNj7CnHh2o1nms3Y2", 282.63795424 * 1e8}, + &TokenPayout{"Dsckyv1yyq8n4N5LdKQoVtACArNDSGrjoHc", 282.63795424 * 1e8}, + &TokenPayout{"DscLCP7b6XKVmf7vZhipnL4vaog6Aj3Thit", 282.63795424 * 1e8}, + &TokenPayout{"DscM4qjEQojmeoAX5whc122JtuvAQKFWXZP", 282.63795424 * 1e8}, + &TokenPayout{"Dscm6U7dgvDmx11s5gGFjYJW2XYcxJEc5WD", 282.63795424 * 1e8}, + &TokenPayout{"DscMeMNGWVM4m6bYhV6GWWocvBY99YTVBm1", 282.63795424 * 1e8}, + &TokenPayout{"DscMijrNNB6bsd5qR9iLFZtd79ikksMYcnH", 282.63795424 * 1e8}, + &TokenPayout{"DscmJnhFYWd9M5PxmVKtvutA4Q8eeCvZUyf", 282.63795424 * 1e8}, + &TokenPayout{"DscmtYc5DX6BrA8T4gYLBtqGKUCfHuQbtp6", 282.63795424 * 1e8}, + &TokenPayout{"DscmWNNeRxqvA1Qe7LmjvKvNDP67vJv3KLa", 282.63795424 * 1e8}, + &TokenPayout{"Dscn67sMheUTsjbxQRMTtedsLePdJkeSvfg", 282.63795424 * 1e8}, + &TokenPayout{"DscNAC7SJVYSbMiMgnLTzZCJZM1GthmQkAg", 282.63795424 * 1e8}, + &TokenPayout{"DscNddwSM8xZx6mb1MpRtyCHdLwejYrsrTC", 282.63795424 * 1e8}, + &TokenPayout{"DscnJYEcqs6pYrR33aHio2eQj8cW52hM3GA", 282.63795424 * 1e8}, + &TokenPayout{"DscnNGsez9esv36tR3kcdT4pTZ1XaGdpqM3", 282.63795424 * 1e8}, + &TokenPayout{"DscnXquegVgcPegW856zbnET8tj5fqgeyyY", 282.63795424 * 1e8}, + &TokenPayout{"Dscop4EuLzo7pXQHkmWxboQTiHNZYYpbKVY", 282.63795424 * 1e8}, + &TokenPayout{"DscoshuEHwmW2uEaZTndzVeYCS4eFZ5Ujio", 282.63795424 * 1e8}, + &TokenPayout{"DscoYKNYkxRuVeidJ8mM6i7YQpdUoQkSmZD", 282.63795424 * 1e8}, + &TokenPayout{"DscP3mqzmHWLVbJdNzJ66vMUEnupKcKYntc", 282.63795424 * 1e8}, + &TokenPayout{"DscpnBSSwtVoEbRRYgEHv31cBRFCGVtx6hT", 282.63795424 * 1e8}, + &TokenPayout{"DscpyrLvd2EPePNCAH8uuiXW4Z4czfV1rHG", 282.63795424 * 1e8}, + &TokenPayout{"Dscqda8F9jb5csAVMJmoVN3HTEAPZspcgA5", 282.63795424 * 1e8}, + &TokenPayout{"DscQmFW6czUqdVAcuoU6fUQqVRG8rt7NuKa", 282.63795424 * 1e8}, + &TokenPayout{"DscR2WKhssmLypzWDiQjdXH6r7uZcSdJooi", 282.63795424 * 1e8}, + &TokenPayout{"DscR4ehEev4AWiJbCqqLq1ZyF1dYRG1Xjr3", 282.63795424 * 1e8}, + &TokenPayout{"DscRMZNae9tJv1iMWjB2bA9JoBbkuy6srhD", 282.63795424 * 1e8}, + &TokenPayout{"DscrnqSr4MBgyPKLYZbKcMhucZuMV5DPVKw", 282.63795424 * 1e8}, + &TokenPayout{"DscrNUCpnFafAx7kQkT6jSa5a9c65AGLdQo", 282.63795424 * 1e8}, + &TokenPayout{"DscRzdnzy31n5xC2hw7eaumjwLBPg9Y7D42", 282.63795424 * 1e8}, + &TokenPayout{"DscSNQighpjWUQkZzLLfJQ3T1TNo1uifjn7", 282.63795424 * 1e8}, + &TokenPayout{"DsctDnTxdFNuoVHPgLDExdpa782u9u3eoHy", 282.63795424 * 1e8}, + &TokenPayout{"DscTnN5z3RFPy8TwzbYotSyt8YTkVN24SkU", 282.63795424 * 1e8}, + &TokenPayout{"DscTQx9xEeKTq7LCvwesH8hK9dhFsNYSYSi", 282.63795424 * 1e8}, + &TokenPayout{"DscTswpy9QS8WKXdhRCBUreoDeXR2B3G3MZ", 282.63795424 * 1e8}, + &TokenPayout{"DscUD8S66DR3F1iyrayGvZmL1ooACqD8CH8", 282.63795424 * 1e8}, + &TokenPayout{"DscUeTmy1Hx2DddfAGX5Qia5G8TuDhgv517", 282.63795424 * 1e8}, + &TokenPayout{"DscuEw4HoTW7Qp8Tvn4fwjtvp3Xgo2fWtP3", 282.63795424 * 1e8}, + &TokenPayout{"DscuKgNjgLvmA6GsBn649k1rxKqVp11KrtV", 282.63795424 * 1e8}, + &TokenPayout{"DscusSNDJE8iX6LrRiYtGG1AhTkBoxzg3sm", 282.63795424 * 1e8}, + &TokenPayout{"DscUtV989jqaH43XQ8Jy4smkvqubdk9itto", 282.63795424 * 1e8}, + &TokenPayout{"DscuuUVwRCWWRYs3WeRrr2r36WhpfgoTPju", 282.63795424 * 1e8}, + &TokenPayout{"DscvAHKZPchwRqj7tmB5yish7K1MbXTbeRS", 282.63795424 * 1e8}, + &TokenPayout{"DscvfQqNBuGBZiEBL8mKqHYnxsDdmbbaP41", 282.63795424 * 1e8}, + &TokenPayout{"DscvSXHJ8oBBkSCA4eqhykMykq2x8913Dyt", 282.63795424 * 1e8}, + &TokenPayout{"DscWgpanwP5KM1VUamX1ZEy2c47GV5Ra7wU", 282.63795424 * 1e8}, + &TokenPayout{"DscWn8puvqnPvy116RA5MKPToA8fWLr71NR", 282.63795424 * 1e8}, + &TokenPayout{"DscWNgufdsAjfmv2XWzZvpoFDXrqHA6VoJ2", 282.63795424 * 1e8}, + &TokenPayout{"DscwrQ5fsv3pVVKRQZkvGzxQm6KPm9dhxVn", 282.63795424 * 1e8}, + &TokenPayout{"DscX3zB8eangkNyiJgUpn1jNcVaADX3ieDC", 282.63795424 * 1e8}, + &TokenPayout{"DscXbfzDKXWu9MXvhn6jyYP9HCRqn8mUNxP", 282.63795424 * 1e8}, + &TokenPayout{"DscxLonDYwpTGeAaVQki6p3mNxpDAV4VddY", 282.63795424 * 1e8}, + &TokenPayout{"DscXo1xdPXxpHsV4rAYTdnp8CzXViYcVtii", 282.63795424 * 1e8}, + &TokenPayout{"DscXz9MazKgmBpDUmCJRzyhc5k2U1oaVugY", 282.63795424 * 1e8}, + &TokenPayout{"DscY41iWdHngMX435WfSooZS2ndMVg1ZB1c", 282.63795424 * 1e8}, + &TokenPayout{"DscY8AXin4XfYUBVu68p6gcagbgUxPh1L89", 282.63795424 * 1e8}, + &TokenPayout{"DscYdavxsJJttsLzNu2bMxShQegpu3m1x5m", 282.63795424 * 1e8}, + &TokenPayout{"DscYhkQJuwgxxzSDwocR38zibZz8T1fUwL1", 282.63795424 * 1e8}, + &TokenPayout{"DscyKkXB6FNV53xRxCzsE72MqXvGx8p4bvh", 282.63795424 * 1e8}, + &TokenPayout{"DscypyGYUssK2QMJhztdwNheJyG1CQD3z8R", 282.63795424 * 1e8}, + &TokenPayout{"DscYttTw5rZXEn7eft5M5AYi3pahcaArnjo", 282.63795424 * 1e8}, + &TokenPayout{"DscYXwdgMjYKFyPz1bQLxU5WMqvwuMjew7Q", 282.63795424 * 1e8}, + &TokenPayout{"DscZ3vDetbHadTvtTazkfrQSLztDeyK7aep", 282.63795424 * 1e8}, + &TokenPayout{"DscZDq1vBDuWGre8ZExBpnPc9sHDgkJFz51", 282.63795424 * 1e8}, + &TokenPayout{"DscZPi47rJRyE2LV2Do4uDnvtAJ2VtZQRf1", 282.63795424 * 1e8}, + &TokenPayout{"DsczvkmD8NSPtYWhCbyuVzacps3K4s9Brmf", 282.63795424 * 1e8}, + &TokenPayout{"DscZvuQ1XfwPoPh7UkpNCRWdMq5DoTG8YAC", 282.63795424 * 1e8}, + &TokenPayout{"Dsd1VhQRiWfSDX4BYekaFGLG6Y5r97jT3VZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsd1yvkjfJPwy47ESYzQkNjCRDgCeqZE4Yz", 282.63795424 * 1e8}, + &TokenPayout{"Dsd29tSAFuwTv6Gd26p94VFoJngbPVXbcD7", 282.63795424 * 1e8}, + &TokenPayout{"Dsd2u4gUW9t1pK4V8oJL8KPd7WnquLRJRk1", 282.63795424 * 1e8}, + &TokenPayout{"Dsd34PMyDZ1L3UbGcba6ZHj7yKkRGYWf1pC", 282.63795424 * 1e8}, + &TokenPayout{"Dsd3F5dh9NtFoNJ8QE7HVM74U4i8atWVYGZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsd3hpa1btvXzQNG7j17tsrbViW8rUu4mak", 282.63795424 * 1e8}, + &TokenPayout{"Dsd4a5iQAt3bacpS3mjkMg4Zb6C1DhDkzQU", 282.63795424 * 1e8}, + &TokenPayout{"Dsd4k929aur8E6Xk99rz7ubCtRNt6LGor1N", 282.63795424 * 1e8}, + &TokenPayout{"Dsd4m3AdHjpyriV7v8eKC6DKBWz5fvAosP2", 282.63795424 * 1e8}, + &TokenPayout{"Dsd4odGnuv96wWh76iJVCBegVezdYbpDQEY", 282.63795424 * 1e8}, + &TokenPayout{"Dsd5DarsMcRGAWdhDXKyJds7SK99b6DreSK", 282.63795424 * 1e8}, + &TokenPayout{"Dsd5mfPSiySUrkz4v4LXriqPRW7cbL3LAHA", 282.63795424 * 1e8}, + &TokenPayout{"Dsd5yz3aomJagd5qAzz16fyUpQC4AHmGDEy", 282.63795424 * 1e8}, + &TokenPayout{"Dsd75w3Z2HEsZL9tzHrtXStrDBhRwQJZ6Er", 282.63795424 * 1e8}, + &TokenPayout{"Dsd76TjEp4VD8r4cB5NUoLRhAYWfhrpp2Dz", 282.63795424 * 1e8}, + &TokenPayout{"Dsd8hh6hToW6NKQZB9PfHbFWmf2SDxc9tiT", 282.63795424 * 1e8}, + &TokenPayout{"Dsd8soUEQ4wueyzkj7x7pnLiaUvsiqQshsi", 282.63795424 * 1e8}, + &TokenPayout{"Dsd8uZQe5cFGKNAku59xdv4LBhNabkbqwXr", 282.63795424 * 1e8}, + &TokenPayout{"Dsd8vLByWUBGNBTQBW3Rz6fYT1eNDKNzftv", 282.63795424 * 1e8}, + &TokenPayout{"Dsd9K64kY95kH6LRKn6HRjUCab8t28ahQUN", 282.63795424 * 1e8}, + &TokenPayout{"Dsd9kaCxJvwYAHGbRv3zY3PRMffGhMsMJH4", 282.63795424 * 1e8}, + &TokenPayout{"Dsd9xcSP299nqkidHFtLCHucCdNZhhjMsFZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsd9Y8R3SjMc9PCpdx6bSKBTBvVZp2EzFsE", 282.63795424 * 1e8}, + &TokenPayout{"DsdAJEMJaJpqb1qiZpysEVCrVdAZ2XtLiCW", 282.63795424 * 1e8}, + &TokenPayout{"DsdAr45Lt37AqHipVeSVe7Dr1p2qhLVX67H", 282.63795424 * 1e8}, + &TokenPayout{"DsdAuP19EDf5vm7awc6U9mx25mRDJstC5dq", 282.63795424 * 1e8}, + &TokenPayout{"DsdAvPeU5QZkkcyH2v8UYxVYt93D6YeoAxS", 282.63795424 * 1e8}, + &TokenPayout{"DsdB5jG55EQPmg3wyLL35yN4aCCMiRygNHh", 282.63795424 * 1e8}, + &TokenPayout{"DsdBcP2HBzEM43dxE4UGzEwTd5b6Mv3b3eR", 282.63795424 * 1e8}, + &TokenPayout{"DsdBEcaA2wAyMupTxNyRhsuTQNYz6CToHqL", 282.63795424 * 1e8}, + &TokenPayout{"DsdbKWJ3iehUcpYjd9CaYqDydVE4ymY7dcs", 282.63795424 * 1e8}, + &TokenPayout{"DsdbWY6b8xvxxdmvWCiN2B2t9iUZQbBrCYg", 282.63795424 * 1e8}, + &TokenPayout{"Dsdc6QUT7ig8yxMnsmSafPNeidxL7HkUAss", 282.63795424 * 1e8}, + &TokenPayout{"DsdCchhCUqTJdUmDxerM9tL3HKt1iZzjXEG", 282.63795424 * 1e8}, + &TokenPayout{"DsdD7GJRhqj1y7MTY7KzK2SKV26oDjiEAgr", 282.63795424 * 1e8}, + &TokenPayout{"DsddehAAFzZFh2F6z64MFkkTVWuxj1mv9JL", 282.63795424 * 1e8}, + &TokenPayout{"DsdDJ8upAatrx6RB5YgeJB3xAFzS7Tu2kSq", 282.63795424 * 1e8}, + &TokenPayout{"DsdDLtJK6Q1n4kLRm54i4psXqnrJsG9qR2u", 282.63795424 * 1e8}, + &TokenPayout{"DsdE3KR8JrX312iLatMBX6tqKG3nLwnSFVU", 282.63795424 * 1e8}, + &TokenPayout{"DsdeA3xMH53UPYhpmQPbPzY41Vks3hLNCMt", 282.63795424 * 1e8}, + &TokenPayout{"DsdeLL1QBGsLpLb1uEBQ5Pn7xp51mnzwcLp", 282.63795424 * 1e8}, + &TokenPayout{"DsdEm6r24NEKScWrBqHufvqA7CzpCPZtSHP", 282.63795424 * 1e8}, + &TokenPayout{"DsdEMpTjLHBtgvoP7fdWQGjBCSXQ6SsJbpH", 282.63795424 * 1e8}, + &TokenPayout{"DsdeuaiLcY8ixmhF8UcjVfY5xMVZjsisBMG", 282.63795424 * 1e8}, + &TokenPayout{"DsdevMTBQNsk52SkJbEc8MhvwTxKjSvXmLr", 282.63795424 * 1e8}, + &TokenPayout{"DsdfaiwvMfXCXMU9T8fDWQZ38NnLMqAQGZ3", 282.63795424 * 1e8}, + &TokenPayout{"DsdFQQdzVuLbPrSds7xgR3fzweUPKPSE4by", 282.63795424 * 1e8}, + &TokenPayout{"DsdGGVcLMHSM7SXDp6NLYQFje8cXTWMdjqo", 282.63795424 * 1e8}, + &TokenPayout{"DsdgjRnQ1UaxedC4WQoTHoM1QiT1GDt64P1", 282.63795424 * 1e8}, + &TokenPayout{"DsdguXtYzwnpJrYjR1KFJkWogEPoVpFLWjE", 282.63795424 * 1e8}, + &TokenPayout{"DsdGzgoBdr1DyoP9VyZpujAFXroQq4w6c3R", 282.63795424 * 1e8}, + &TokenPayout{"DsdHaoByFuGviDNBN6SQNVqvijtEzGBTh74", 282.63795424 * 1e8}, + &TokenPayout{"DsdhK2PzTEhAaYvVAGNfHWEUA411fYydJLH", 282.63795424 * 1e8}, + &TokenPayout{"DsdhNxvMUYUXekXLkaibi56b2htpfaSccic", 282.63795424 * 1e8}, + &TokenPayout{"DsdHPx2cxgutRhkhooLkhQ8y9fgH4SCnYy4", 282.63795424 * 1e8}, + &TokenPayout{"DsdHRjyJUJo1y3WE917iSoUSpfgUXTYcvrA", 282.63795424 * 1e8}, + &TokenPayout{"DsdHuGajEvTfJAZatMUCP7QLmWJHVaCZsRV", 282.63795424 * 1e8}, + &TokenPayout{"DsdhVqZm2En25VtA5sfaE1SgbqnQcYFpRGN", 282.63795424 * 1e8}, + &TokenPayout{"DsdicAoakXCt4YSedajGDXnMEtpBhzdzfjW", 282.63795424 * 1e8}, + &TokenPayout{"DsdiizDC3neDo52E3kVJ81Mn5ni2gDnrB5T", 282.63795424 * 1e8}, + &TokenPayout{"DsdixQvUh349HPSkw5C8bdFKDqeB2J3qR9S", 282.63795424 * 1e8}, + &TokenPayout{"DsdJSYTd6D6uc1GabVvZJjauGzJYymu8xx2", 282.63795424 * 1e8}, + &TokenPayout{"DsdJVjKAKh3rZnw3b4oPEHHfRdRUnguMj36", 282.63795424 * 1e8}, + &TokenPayout{"DsdkFgm9mqumKurAQfyht6xyVQimdkaZi17", 282.63795424 * 1e8}, + &TokenPayout{"DsdkL8jGv1RGEuzywT4zAzNovbnxFdL6Baq", 282.63795424 * 1e8}, + &TokenPayout{"DsdknoxFmZi9vanaVzwcvBMvHH17swQjCMe", 282.63795424 * 1e8}, + &TokenPayout{"DsdLpcjUd2MahEqLn7R5L6u6RhGqdz1zAPK", 282.63795424 * 1e8}, + &TokenPayout{"DsdMbubHaXaZiVEqHGCAJGpi8Edp8yEfbvm", 282.63795424 * 1e8}, + &TokenPayout{"DsdmRSHNxLt1zffHaMM9VCDG42Kg5KPmFKk", 282.63795424 * 1e8}, + &TokenPayout{"DsdN7hHdcjFzDVEeqTZhbxABDHVhYZF8Kv4", 282.63795424 * 1e8}, + &TokenPayout{"DsdNepVuiD5BLTpExhYnVyUYcCCygoDGukZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsdnk8dkik6XNm5mctSUBvS8pmbgA6BzB8d", 282.63795424 * 1e8}, + &TokenPayout{"DsdNXGviSCkUqRctYZfjn4vKnNHfY8bA62Y", 282.63795424 * 1e8}, + &TokenPayout{"Dsdo7njxMnnKrNsQn5zG8SMxKtxj2CRnfD5", 282.63795424 * 1e8}, + &TokenPayout{"DsdoJjnKUbtGudgBh27ceEPACF9iHAHQgak", 282.63795424 * 1e8}, + &TokenPayout{"DsdP3mu3GbJZsMdj8e9Gv4c2WiRTNAdxGyL", 282.63795424 * 1e8}, + &TokenPayout{"DsdPeWsLT8U7r7UZKe8YF79fL5zh3Skabgg", 282.63795424 * 1e8}, + &TokenPayout{"DsdPFqoepeCecGQzxXm6RUaezDKoypYfhep", 282.63795424 * 1e8}, + &TokenPayout{"DsdpNyvLt6AZDu39BJVqCE2JRYEFFEzktZD", 282.63795424 * 1e8}, + &TokenPayout{"DsdPwHmv8BZXRR1LdXWYQB4ZAQt5tcFjo6P", 282.63795424 * 1e8}, + &TokenPayout{"Dsdq4C3P8fKChH1QVAAeN19xi4myUKdbVwe", 282.63795424 * 1e8}, + &TokenPayout{"DsdqPt9L9bFHv7med9Hrhv7PtxE2tXJV7wt", 282.63795424 * 1e8}, + &TokenPayout{"DsdQQ2hF1Qvj9uv4QpL8RUujiLtXeJzuKxe", 282.63795424 * 1e8}, + &TokenPayout{"DsdqqYn6nNHQStAQhwKu4Ws2YkcBJW8C5bC", 282.63795424 * 1e8}, + &TokenPayout{"DsdQucWqDCysCL9jpaitGndanXW5uebZMJf", 282.63795424 * 1e8}, + &TokenPayout{"DsdqWPZtGMn2WRKLX9DWqUw4T4ikfrsPyPP", 282.63795424 * 1e8}, + &TokenPayout{"Dsdr6vNJCAeCHZ1LuTayreivN2S9PEovUNE", 282.63795424 * 1e8}, + &TokenPayout{"DsdRUBvxdRUT8h7r4JNHF2Y3xx8aztJnkXA", 282.63795424 * 1e8}, + &TokenPayout{"DsdSajtaBwkjbaLWKyXgfbjMDTq7284wwSt", 282.63795424 * 1e8}, + &TokenPayout{"DsdsfKcedmV79jmgrRfKUM45cD1eqbWKFzZ", 282.63795424 * 1e8}, + &TokenPayout{"DsdsokyQDi2jyKDDXTx8nYDGthGr38rLaPA", 282.63795424 * 1e8}, + &TokenPayout{"DsdSqJXHeDxVkHbQzphpd9jvL4WEFZAjSJ9", 282.63795424 * 1e8}, + &TokenPayout{"DsdSRp2mkrestcdScJ7swSfXt6qtwj2BP25", 282.63795424 * 1e8}, + &TokenPayout{"DsdT5UNLZch9bqFD3CLLq2uAWYr5bg9ewd8", 282.63795424 * 1e8}, + &TokenPayout{"DsdtgTT6PLs5UViHYZN4MKF1UUwoBkLKr6L", 282.63795424 * 1e8}, + &TokenPayout{"DsdtqzXz5553suMVWhBANKPccaxNTDT4g4S", 282.63795424 * 1e8}, + &TokenPayout{"DsdTtE4Wk5PgQDL3EwNTVerDMp6RYneioof", 282.63795424 * 1e8}, + &TokenPayout{"DsdUmoPZKrebiybLoHUhUXMbktDqPCnVe1h", 282.63795424 * 1e8}, + &TokenPayout{"DsdUtPvXRX4HbTaw1gTx7Abiic9CfwvXnwN", 282.63795424 * 1e8}, + &TokenPayout{"DsdUxJr2Fe67qGW2EpNdKvHduwWcWJWncQQ", 282.63795424 * 1e8}, + &TokenPayout{"DsduYHTGs6vKch4jyGUsjw7CULnVSNbkZw5", 282.63795424 * 1e8}, + &TokenPayout{"DsduzgYxtK3X1dXsZ6wRNe1baLH2iREyVa5", 282.63795424 * 1e8}, + &TokenPayout{"DsdVoqfqMM8MDMqcQErXrrHeRMxuVxp5i6F", 282.63795424 * 1e8}, + &TokenPayout{"DsdWKdUs7dAq94ojrbAazS9iaPAma7rsnY5", 282.63795424 * 1e8}, + &TokenPayout{"DsdwKF4f5T7w8ubQGKjQan8YtwmtwBZbuN5", 282.63795424 * 1e8}, + &TokenPayout{"DsdWLXWKoqiSaJt2FykuCC2ZaaVXkgvnHKv", 282.63795424 * 1e8}, + &TokenPayout{"DsdWTExtEew2NiQaitaT2CHJqZshmjgpbHs", 282.63795424 * 1e8}, + &TokenPayout{"DsdwxPBR1Rv3WnMrHgjkKhm2Xy6TYUB3k3M", 282.63795424 * 1e8}, + &TokenPayout{"DsdWZqYxLEagr2VAXZQ5VmuyGmNwvKrY2gZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsdx5amEwfqjA5LVFDnLeU7vnXST6bkEJnr", 282.63795424 * 1e8}, + &TokenPayout{"Dsdx6AfMnL5irfqzT5R44UQmxge7FQQzxHN", 282.63795424 * 1e8}, + &TokenPayout{"DsdxEPUaeA6aYhJCeoZX1rSPum7eZt1bSke", 282.63795424 * 1e8}, + &TokenPayout{"DsdXmuyAPgf6iNz94HoXzdsFAp5W8tvXAvK", 282.63795424 * 1e8}, + &TokenPayout{"DsdXw9FYSaRa1kKz2QUtdfL4QgU4tCJy1Mj", 282.63795424 * 1e8}, + &TokenPayout{"DsdXy3KsvUkjXjSq9mL2Es3rfDyApVVm66J", 282.63795424 * 1e8}, + &TokenPayout{"Dsdy8vThnoPiegxXLLHMepMAmmmzT7V2ZKk", 282.63795424 * 1e8}, + &TokenPayout{"DsdYj7i997GZthR7xyy5mvMERkazVmbDnhU", 282.63795424 * 1e8}, + &TokenPayout{"DsdYLUDjPRLLeceyoDjmsoSn6DQZnxnUSQM", 282.63795424 * 1e8}, + &TokenPayout{"DsdYRT6hsv8D3v7U47j5FoNS9SfGwaxDuwu", 282.63795424 * 1e8}, + &TokenPayout{"DsdZAfLaQny5R1Fa5QR4WG75zLXMYhQNHon", 282.63795424 * 1e8}, + &TokenPayout{"DsdZgbf68GbN2xWGGbeEfxin2GVYaXcKwLk", 282.63795424 * 1e8}, + &TokenPayout{"DsdZVLNpM3Tv4QdSoK1n3jxr5YwNbJsE2tc", 282.63795424 * 1e8}, + &TokenPayout{"DsdZzC93WBfLs5K5Xq1Up2F3tfhFNBt2Jkq", 282.63795424 * 1e8}, + &TokenPayout{"Dse1G2Yddh4qiaX1iTFwAdZwq3Ucyba3B2P", 282.63795424 * 1e8}, + &TokenPayout{"Dse1juU2fNZLyoPhMt4wiYfBFDAE1Jc9hYY", 282.63795424 * 1e8}, + &TokenPayout{"Dse25ZGdD3cgLuwF9oFNBkrR2oLa3oi68tm", 282.63795424 * 1e8}, + &TokenPayout{"Dse2fCGHk6vfxPtAZN2FmPJyB8pkHxh7FpM", 282.63795424 * 1e8}, + &TokenPayout{"Dse2qwuh3mww3wFYBWFGESMmFaguPDuKRo6", 282.63795424 * 1e8}, + &TokenPayout{"Dse2yrbsxuwGZgJJvtEMykoV2qfbc3ReZcA", 282.63795424 * 1e8}, + &TokenPayout{"Dse3bf9LVWyyoCwProjfnUq9SVnAmvJna8V", 282.63795424 * 1e8}, + &TokenPayout{"Dse3kcfZCMTwLzx5yhHgx7t8Bu2GhvgFywL", 282.63795424 * 1e8}, + &TokenPayout{"Dse3VTdfoUw21EjYbRycABccpRAAz3KgyXx", 282.63795424 * 1e8}, + &TokenPayout{"Dse42VGEQRs3zzM5nmAvhF5JcKwUtRhjEia", 282.63795424 * 1e8}, + &TokenPayout{"Dse4dD7bvvrjYnq5jmd6qMJxpURkTFmxDxU", 282.63795424 * 1e8}, + &TokenPayout{"Dse4j1qMWJLdpZnMgc4cXhjD2AcnBRSsSzp", 282.63795424 * 1e8}, + &TokenPayout{"Dse4LmrcUUTo1oPxQ1m5qwUwXBy9HdrC3R3", 282.63795424 * 1e8}, + &TokenPayout{"Dse5GGUaoQ3zigLxSoSUYTvVQxKM2GmN6KR", 282.63795424 * 1e8}, + &TokenPayout{"Dse5xw7buiqURs5CeE2Pr85MLAaUUYsX55X", 282.63795424 * 1e8}, + &TokenPayout{"Dse62sSRgCg6BuUeqsCUa1fxrhaLkPyjTtw", 282.63795424 * 1e8}, + &TokenPayout{"Dse777CtvVX1qfuafidM8idFW7T1GnWDbWz", 282.63795424 * 1e8}, + &TokenPayout{"Dse8kxMsxW2hfRXYBYabV8sqF1ZdrCgVQ5U", 282.63795424 * 1e8}, + &TokenPayout{"DseA6pS2QFsoqCRc9exFK5jnwvtArAmeGwz", 282.63795424 * 1e8}, + &TokenPayout{"DseActvjDLEA9y6DyKcnk6BjNkeAPMYLH68", 282.63795424 * 1e8}, + &TokenPayout{"DseaKtR91CqfG9NU1JTsCdfZLeDqNxRornW", 282.63795424 * 1e8}, + &TokenPayout{"DseAxqRPYhL7mXsSRo3CBMJtyJHPrqcL6FA", 282.63795424 * 1e8}, + &TokenPayout{"Dseb3YgRgD22AYEdTT4TqQnCu3wK7GwiQ6Q", 282.63795424 * 1e8}, + &TokenPayout{"DseB9UkrQc36nvoqYWK4SykRhB3Cmh8RXTq", 282.63795424 * 1e8}, + &TokenPayout{"DseBhuFEBv1nzK3S89a8yiGj2Mw4LJECZS7", 282.63795424 * 1e8}, + &TokenPayout{"DseBrAHqyXuUg7qcJGAe5p4vxYuCa1dw1sW", 282.63795424 * 1e8}, + &TokenPayout{"DseBTfDwU9kznfefrCx7avkm1BhzczH34V2", 282.63795424 * 1e8}, + &TokenPayout{"DsebTVFyB4JYdza24rrpAZnx5tBPnN2mmds", 282.63795424 * 1e8}, + &TokenPayout{"DsebYA8booaW1aZCzjhYa2hW5zpRSHyTriU", 282.63795424 * 1e8}, + &TokenPayout{"DseBYHXbAdGdTk2rmjiQvz2nUVXx7a6Mz4F", 282.63795424 * 1e8}, + &TokenPayout{"DseCcvDZfviY34ba7Mst6cxL7WjubEycphk", 282.63795424 * 1e8}, + &TokenPayout{"DseCyYaWqHQJFApB3BReZSdYrmcJbkXxuEf", 282.63795424 * 1e8}, + &TokenPayout{"DseD8UqNbhjfSVbWPW2SSvxJ2wbEKKPHCY9", 282.63795424 * 1e8}, + &TokenPayout{"DsedAt7kAUTZVWeSEXkHNqCrPj7YAwvVkDS", 282.63795424 * 1e8}, + &TokenPayout{"DseDDmPXbE1jMGwfTKeApfAAobbrgmfj5Eo", 282.63795424 * 1e8}, + &TokenPayout{"DsedHbD5cTJmn8wbHSj8LyQnK7CNKgvoqj3", 282.63795424 * 1e8}, + &TokenPayout{"DseDuqPUSWbacnnSuQUd3oddjJ1VFjnmD4P", 282.63795424 * 1e8}, + &TokenPayout{"DsedVVwn13TYXTWZqLSTRW7gg8b3npwX86h", 282.63795424 * 1e8}, + &TokenPayout{"Dsee22nZfPMHvNW2wv1L3zihLTnWsRy8AXi", 282.63795424 * 1e8}, + &TokenPayout{"DseE5rEiaTbcGgmpqHRMWenSHti9BxPMbgF", 282.63795424 * 1e8}, + &TokenPayout{"DseEcmQTUuGyw4XAmXUkSN8z6SkbQKdUScr", 282.63795424 * 1e8}, + &TokenPayout{"DseEDsnkhnhnhj2u8DAJTzRqdwf7VGSuLTC", 282.63795424 * 1e8}, + &TokenPayout{"DseEHqobMNADgNUkeTHxBSmyaSKLa5A9bYu", 282.63795424 * 1e8}, + &TokenPayout{"DseEmuLkC7KQj2poMLWEcs7ft49XdMQiGej", 282.63795424 * 1e8}, + &TokenPayout{"DseF2UooQ8R78U6ZkPxEkUfzvyWMS3uVZQk", 282.63795424 * 1e8}, + &TokenPayout{"DseFauqo9ZMaHhHxTBcF3Rn8n1j8b2jtMXc", 282.63795424 * 1e8}, + &TokenPayout{"DseFTeKmSoQDWACEvFvegBXWRATJuDgmBwv", 282.63795424 * 1e8}, + &TokenPayout{"DsefvpY4Y1sHxC8iKNE5zJXLhABskopivGW", 282.63795424 * 1e8}, + &TokenPayout{"DseGinqiiTDakc4zCwLjUbZs8BEWg1wvCVq", 282.63795424 * 1e8}, + &TokenPayout{"DseGtvm8h9GhDyEh4SLGxo1GfxfAUtTAMRn", 282.63795424 * 1e8}, + &TokenPayout{"DseGZfWgDVsojMaLZurxYDsgXnCezU725an", 282.63795424 * 1e8}, + &TokenPayout{"Dseh4Fs9LZpYzUPiwNe6p83QwqGDwnVG4Vi", 282.63795424 * 1e8}, + &TokenPayout{"DseHd7YnEMMvUhjWbu2PDjVaWaN8AyfmxCj", 282.63795424 * 1e8}, + &TokenPayout{"DseHHRnhBNmD2iectxjasn335dyf1Q4H1Jb", 282.63795424 * 1e8}, + &TokenPayout{"DseHutzskBE5dghNq1xQBpFdZb9E64xhaFz", 282.63795424 * 1e8}, + &TokenPayout{"DseidRpsZ7sDjJh42Qiiriu4a3fW9XQEECH", 282.63795424 * 1e8}, + &TokenPayout{"Dseif7o45Sa9KrsZZaXBxHSY8GENDozEHay", 282.63795424 * 1e8}, + &TokenPayout{"DsejHhk7ZApD3WLYinSyrgkRtM461yHBNAD", 282.63795424 * 1e8}, + &TokenPayout{"DseJsU9a4UqSDECLkvE6Ctv2JJ6BVUGMREt", 282.63795424 * 1e8}, + &TokenPayout{"Dsek2xhmA6oHJoFgtkEnsgcYVra4RYYopp4", 282.63795424 * 1e8}, + &TokenPayout{"DseKdyKpjwPUACf25bwnY2ffFY7Jf1pn7DY", 282.63795424 * 1e8}, + &TokenPayout{"DseKMcpraLsiDBMhG5sjCr4FKRUacB8kfc7", 282.63795424 * 1e8}, + &TokenPayout{"DseKYB3D8eA4ZifsDMXuMfGBeWFBYLZzPgn", 282.63795424 * 1e8}, + &TokenPayout{"DseL3RZsHkgKsF8BoLr2znytYi7oL75x89t", 282.63795424 * 1e8}, + &TokenPayout{"DseLyjrygeJ4Ama5psCi1b7R4j3QcFVRtEe", 282.63795424 * 1e8}, + &TokenPayout{"DseMbsFpJkGEWhMNU1QfuAUvvEx14VQvMvE", 282.63795424 * 1e8}, + &TokenPayout{"DsemJsZ8drXZM9RDpzXr5WRph2E7rrkjeeC", 282.63795424 * 1e8}, + &TokenPayout{"DseMXtEteipNbrrUDDpCgANcaM7HBEDz3Ci", 282.63795424 * 1e8}, + &TokenPayout{"Dsen7FwLFNqCBrDDsP4yUm2hMAjvXADVoKR", 282.63795424 * 1e8}, + &TokenPayout{"DseNaLCT6R376e9CdN3thWvK5PsJgNJ5WBX", 282.63795424 * 1e8}, + &TokenPayout{"DseNBKeLTMaDe3XLtsPgqLgo272nrdvdGCG", 282.63795424 * 1e8}, + &TokenPayout{"DsencQMDir9ciNxpBNijQSeV851v3mbYFdm", 282.63795424 * 1e8}, + &TokenPayout{"DsenGuxYdPnmvjtZJwALWcZ4uzdawCKa7WS", 282.63795424 * 1e8}, + &TokenPayout{"DseNkC98P8w8f8r97TK6brXWURM2HVAEHGP", 282.63795424 * 1e8}, + &TokenPayout{"DseNqjwjCPrecty1hToHkniBz5QsMZs8pXQ", 282.63795424 * 1e8}, + &TokenPayout{"DseNZ5ofxwRThCGgCCqAAGpSB9wY5dRj6k8", 282.63795424 * 1e8}, + &TokenPayout{"Dseo1Hxdr2qStNKuwu3JPL8yNbkfd2jTWeq", 282.63795424 * 1e8}, + &TokenPayout{"DseoWS64PkozgqTxMcp4siJdXULrGeTeBxK", 282.63795424 * 1e8}, + &TokenPayout{"DseoyjD3v6oqTFj4ogKu8yFwfmguJLLeeEn", 282.63795424 * 1e8}, + &TokenPayout{"DsePQWVR9dKoMFcws1QNCoCtG1X7HhWGqmh", 282.63795424 * 1e8}, + &TokenPayout{"DsepVm6bDDuC4R3EQ6cqrBj2SE9d75JmVkM", 282.63795424 * 1e8}, + &TokenPayout{"DseQ5Qtn2caci8yw1sPq2Xo1g66it5vFkgx", 282.63795424 * 1e8}, + &TokenPayout{"DseQG1wXEBEiE7mzwB3WADBsUTGzYbSU7Gz", 282.63795424 * 1e8}, + &TokenPayout{"DseqjWC7WZF9iX8B2JPfCbGCQdbnoQqnyZw", 282.63795424 * 1e8}, + &TokenPayout{"DseqP65vpaTEESHCFVakE5Sd6DuJN7VVYP9", 282.63795424 * 1e8}, + &TokenPayout{"DseQXAQkJEYxpAk1idhohovs8ZYbk4bK6x4", 282.63795424 * 1e8}, + &TokenPayout{"DseqxyVMekHuLDNT1Cjo65oreRqZNwYE5iU", 282.63795424 * 1e8}, + &TokenPayout{"DseQYUyhRpVeEQjHd1WpE7wNC9jeZ1SYZbU", 282.63795424 * 1e8}, + &TokenPayout{"Dserafx1xpii3nYwnRjuWpNYeHuMmeBHryZ", 282.63795424 * 1e8}, + &TokenPayout{"DseRBFHBji8ktqoM4Vvoor2qbfUMVxRiLKb", 282.63795424 * 1e8}, + &TokenPayout{"DseRDHMiVcA1RnmGVQ6R2mq1or9ihr4czBu", 282.63795424 * 1e8}, + &TokenPayout{"DseRidEscAD1AfPFj1wVss2P4yQc9YbhrWp", 282.63795424 * 1e8}, + &TokenPayout{"Dserjng5Q1ng89wud5hrH94YbZmJr5PyX13", 282.63795424 * 1e8}, + &TokenPayout{"DseS6XNjpfmcbgFaMPv1mHS5pzJqZAYGorh", 282.63795424 * 1e8}, + &TokenPayout{"DseSBaP6m6KHjtw1veyqGANA9iFyS6nXT7h", 282.63795424 * 1e8}, + &TokenPayout{"DseSg5Jt7c3fgzUNXkGMJbQXfa4K4z2Gmpj", 282.63795424 * 1e8}, + &TokenPayout{"DsesgHKTTcUxpjHNPGVCuHZwyv2aUEnUwvu", 282.63795424 * 1e8}, + &TokenPayout{"DsesVKbJYPoVHicbByzJhaXtUeGJTjGXFSC", 282.63795424 * 1e8}, + &TokenPayout{"DseSvTm9yAUnJBxEXLPuLBc62hgHrtWXURM", 282.63795424 * 1e8}, + &TokenPayout{"DsesX3Ck2hn3jJ3omuwu4oHNDPNZeW75zKf", 282.63795424 * 1e8}, + &TokenPayout{"DsetHWmhg46YzDLjbfAMx4HzgQqeEwgGjTk", 282.63795424 * 1e8}, + &TokenPayout{"DseU2mP9ppgaeYDSDpw4Zbxfqit8MiKfXyf", 282.63795424 * 1e8}, + &TokenPayout{"DseU6fTmEuL2zdSobkPWvTnHcNMQdLageJ1", 282.63795424 * 1e8}, + &TokenPayout{"DseUDDMsmLqPwe9Q68pjpFWXwsBbhZ6E6WY", 282.63795424 * 1e8}, + &TokenPayout{"DseuRGtPzqyr4oG5cFyvddUk2NwDWLQpP5D", 282.63795424 * 1e8}, + &TokenPayout{"DseuwtodahTx9PhPc1MY5tzMpk6QwzAJiaB", 282.63795424 * 1e8}, + &TokenPayout{"Dsevgp1jD792n2bVLep9YJ4ywZfFGt6g5fk", 282.63795424 * 1e8}, + &TokenPayout{"DsevHgKxzqNbcm1z4Jzx4hZaawfJokaYj4F", 282.63795424 * 1e8}, + &TokenPayout{"DsewG6wHhfJ7NGgUNznJMCy7UGFMpF4Mh1Z", 282.63795424 * 1e8}, + &TokenPayout{"DseWiMDAZcWwK73WjMBbhKNdSZXWs2NdWDz", 282.63795424 * 1e8}, + &TokenPayout{"DsewoTJURs6TUWMfrXK8RAgkJYamUyN5oM6", 282.63795424 * 1e8}, + &TokenPayout{"DseWRAp3V4ZVLCU8pPLk3sVn4TTCDwLj67Y", 282.63795424 * 1e8}, + &TokenPayout{"DseWwHA9hMtqCCDx2fkLcSCu11EzvBQ4VhX", 282.63795424 * 1e8}, + &TokenPayout{"DsexE76Gb8x15jAVz67ogeoq4XZJK7L5jTT", 282.63795424 * 1e8}, + &TokenPayout{"DseXmxzqywX3CSogefBFgXsdSgUdQGqrd23", 282.63795424 * 1e8}, + &TokenPayout{"DsexYvVLGoXs6tvJVuNmmiW7EnZ31Enoo8V", 282.63795424 * 1e8}, + &TokenPayout{"DseY8kZ42XhkEDgwvsZccAp9R8cYiKETz4m", 282.63795424 * 1e8}, + &TokenPayout{"DseYAjUhusM8iamku8fEaohiGEKwGpnmSW9", 282.63795424 * 1e8}, + &TokenPayout{"DseYbA2g7mVAG58NeEH6LesJux51xmfYjNo", 282.63795424 * 1e8}, + &TokenPayout{"DseyTWYYjHfcuCtauy1zS8jRZFvx1wjJwa8", 282.63795424 * 1e8}, + &TokenPayout{"DseZ4z7Ej64Xrmnffpgdfk4BZz5tpoMSxHH", 282.63795424 * 1e8}, + &TokenPayout{"Dsez6RbZtSqfVVs884Wcmifc87D4QkHJTbk", 282.63795424 * 1e8}, + &TokenPayout{"DsezDFqY6af9ebWqAtzKAbyseiECYnK4s6v", 282.63795424 * 1e8}, + &TokenPayout{"DsezDKGC2dpaVyi7aCskAb4n5yHzNPDC4XL", 282.63795424 * 1e8}, + &TokenPayout{"DsezfXC6jB5du4feVt5BeZQ5PARAECk3YYv", 282.63795424 * 1e8}, + &TokenPayout{"DsezHngovLwLdWk5ZqXkKqyMCnjDxca1boG", 282.63795424 * 1e8}, + &TokenPayout{"DsezWSQ3UyramdMPjUbQ9KPon1yfPRYeVBf", 282.63795424 * 1e8}, + &TokenPayout{"Dsf1mAytAsXecx4fTjwr1Y8TWeSAWewftZ8", 282.63795424 * 1e8}, + &TokenPayout{"Dsf1oVJq8SrRA5u4VBgXY6Vw6S6nTixFzzP", 282.63795424 * 1e8}, + &TokenPayout{"Dsf1vUjGJKmZLftb3eVRYLRe3hjtQvZHxV7", 282.63795424 * 1e8}, + &TokenPayout{"Dsf2C8wijwMcKtmUrqPjKgwJK2fejUhC6A5", 282.63795424 * 1e8}, + &TokenPayout{"Dsf2gTL1sWNHQ5SxQWBkrNedYZA3vHixgpV", 282.63795424 * 1e8}, + &TokenPayout{"Dsf2TrHRYmQyaDHadwkV63ZXReK3ThQSd2F", 282.63795424 * 1e8}, + &TokenPayout{"Dsf2zJHCcBGr5MnLhtsZLXNLMVbjjXsPPrg", 282.63795424 * 1e8}, + &TokenPayout{"Dsf3GA9CxkTva7XfDJgVdHNVYu9V4pe5Giq", 282.63795424 * 1e8}, + &TokenPayout{"Dsf3QUS7iwsrUGtAmpgi9TUjvAVFp1WDix4", 282.63795424 * 1e8}, + &TokenPayout{"Dsf3TazAADrpWCaMu9SitVLavuTPE8aWLFQ", 282.63795424 * 1e8}, + &TokenPayout{"Dsf4aCWEAeAUWsfGL3x8RJ1fWBSPn8QdMNG", 282.63795424 * 1e8}, + &TokenPayout{"Dsf4eWchs1pxevbhyvrEvMD5smqp7sqta1D", 282.63795424 * 1e8}, + &TokenPayout{"Dsf4XLL2RzL66tMWUMr8xnNFXQeRrV2PP8B", 282.63795424 * 1e8}, + &TokenPayout{"Dsf55gx97vatGFCrqZBufN9Dn76thA1oZZD", 282.63795424 * 1e8}, + &TokenPayout{"Dsf588TffjxR6wSKL94iHNmjgJ6wKN9siJ1", 282.63795424 * 1e8}, + &TokenPayout{"Dsf58xVVQooRo1sSRnmJXnn4UYZwjm6yzmm", 282.63795424 * 1e8}, + &TokenPayout{"Dsf5FfFm5iE1LSBKGcm2i2Rxs7FH7UHuydJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsf8eXSdkCKxpjdoAvt7QC2Px1uSuZvSvcV", 282.63795424 * 1e8}, + &TokenPayout{"Dsf8qQzoe8WTqy6UsEo5D7WNtoihNnPtKAu", 282.63795424 * 1e8}, + &TokenPayout{"Dsf8RwLcQUA65S2CgYvcTSYKpxvgfss2V68", 282.63795424 * 1e8}, + &TokenPayout{"Dsf9AbRHAY8pXqKt85LJQaqZ2Y4cf3arYnC", 282.63795424 * 1e8}, + &TokenPayout{"Dsf9FxWVd1MC7WKctpi4Kj7HmYzq4XEau9V", 282.63795424 * 1e8}, + &TokenPayout{"Dsf9KpTSe9ARjvA6fiTGG6AYrt6fcDTE4q9", 282.63795424 * 1e8}, + &TokenPayout{"Dsf9qNMBj6BYroTjAhhCezy8bfHVaQrwDoy", 282.63795424 * 1e8}, + &TokenPayout{"Dsf9TyQpHqMDso9ubayfUS4QRx76k3vgLvo", 282.63795424 * 1e8}, + &TokenPayout{"DsfaQ5x5MPrzB6sLcnYnFRKYk7Si7f9dvxM", 282.63795424 * 1e8}, + &TokenPayout{"DsfaXkTcjtv8v4yKh2e82eRRWb36tiq3nYN", 282.63795424 * 1e8}, + &TokenPayout{"DsfB21gTc9ozTwABS4qPwaFsqrqfqFwRdx6", 282.63795424 * 1e8}, + &TokenPayout{"DsfBgBzpWqpMC46XBohmQ4uLZS4qdHsH3EM", 282.63795424 * 1e8}, + &TokenPayout{"DsfBhfuCX1pLWhLddYvn8mefG95qd7sM2hx", 282.63795424 * 1e8}, + &TokenPayout{"DsfbjQGWqS29Pd9Gqi2io1BVeSVrxf3TMEo", 282.63795424 * 1e8}, + &TokenPayout{"DsfBm1JUtrQRnqVBGerE6nyjS1L4FLrAeqX", 282.63795424 * 1e8}, + &TokenPayout{"DsfbqH521aXT1iE7KnKPCmncNse9EKr1m4W", 282.63795424 * 1e8}, + &TokenPayout{"DsfC24n6EgHEt86VNs9pdVmnKVzjN72vjwR", 282.63795424 * 1e8}, + &TokenPayout{"DsfC5Dw4Wbbw2Qpn9QWh6D4VytPfDLjZfFd", 282.63795424 * 1e8}, + &TokenPayout{"DsfC8LPQS1KuT8kjUHQnodkV8ZejioN6q8i", 282.63795424 * 1e8}, + &TokenPayout{"DsfciiwaGXpgXxF27JnWr5yimzhV4XsZFod", 282.63795424 * 1e8}, + &TokenPayout{"DsfCwi76ptMTZuUdwZyY7KzCmNKzvZm6CJB", 282.63795424 * 1e8}, + &TokenPayout{"DsfCzLTFZNa9xHmfWZ4perrJJ9bkaRLniMG", 282.63795424 * 1e8}, + &TokenPayout{"DsfdPAvWJ9m27AYCZpV8kbkvYKMCUGa8sQg", 282.63795424 * 1e8}, + &TokenPayout{"DsfdPRrQvJY1XXqJeQtL9zBRteTTQk1kQeJ", 282.63795424 * 1e8}, + &TokenPayout{"DsfDRwsjDkz3BGr4yhXmSpizQ7eyFDrMJ1b", 282.63795424 * 1e8}, + &TokenPayout{"DsfdTL1m3czZeLxtzXvfffETTpwHMPGZuVJ", 282.63795424 * 1e8}, + &TokenPayout{"DsfDYYUs6RCPZW1P4g614SYJthiGiZNhiru", 282.63795424 * 1e8}, + &TokenPayout{"DsfEhZABs33GprvyEvPmT3QQziTNGBsebLK", 282.63795424 * 1e8}, + &TokenPayout{"DsfELJVTbiFLzpEqAdjTnvmSRbietktLMLi", 282.63795424 * 1e8}, + &TokenPayout{"DsfFuE3jrwBj6jWxtvTLM48pVLDjfvcPWkw", 282.63795424 * 1e8}, + &TokenPayout{"DsfGb5in4YDumxjEGdDLwVjXW1qrCHCumzD", 282.63795424 * 1e8}, + &TokenPayout{"DsfGRvhq8NbNLGAAPM2ujgwSvZ96t6Ld9it", 282.63795424 * 1e8}, + &TokenPayout{"DsfhANzME7EkWXKAbT9ecnuvvAGJ5KLi8Xn", 282.63795424 * 1e8}, + &TokenPayout{"DsfHDtMpeppChhb5dqmo6rE3GQULwsfapri", 282.63795424 * 1e8}, + &TokenPayout{"DsfHhmLBnSs7PHFpyJmbuDGoj4TmCaBr1yQ", 282.63795424 * 1e8}, + &TokenPayout{"DsfhHxjoHsBuZAsNN4NCLTHPkKQHwxJhXsE", 282.63795424 * 1e8}, + &TokenPayout{"DsfHJcBMfMUQeboxwiBF3oL9xRaNzqVThC5", 282.63795424 * 1e8}, + &TokenPayout{"DsfHrjY47ymwekxRMGW43ioBtoFaWQ2xYWb", 282.63795424 * 1e8}, + &TokenPayout{"DsfHybFtAbT7Exmn2vGHxgQ5c6bUhFacdiV", 282.63795424 * 1e8}, + &TokenPayout{"Dsfi78W5TP98BmjxHF7wrQTqjYapkM2K29y", 282.63795424 * 1e8}, + &TokenPayout{"Dsfi8oLsUn6NQCc6iBWhp9AaRDTvEyE7tYB", 282.63795424 * 1e8}, + &TokenPayout{"DsfiLeHfAb2eYYM2AqH1S9DAca22d4WifhQ", 282.63795424 * 1e8}, + &TokenPayout{"DsfjGQ3WBQd8dHmEvsFyHJ25MpqJcYWXACe", 282.63795424 * 1e8}, + &TokenPayout{"DsfJGRPLxzJ7q5JsDFSuudrovnQZPoCMnEf", 282.63795424 * 1e8}, + &TokenPayout{"DsfJhr31xwLjkZo63nBuEEYm89ghvdH46nT", 282.63795424 * 1e8}, + &TokenPayout{"DsfJpYtSSRLVSSqTjSutiiyuKeS85py9LHA", 282.63795424 * 1e8}, + &TokenPayout{"DsfJzbRhBGdfJLryDQt6ypeewadT4AhkdEp", 282.63795424 * 1e8}, + &TokenPayout{"DsfkfiTYiL61S2AUkzWXnE6W8M46V9SsfCK", 282.63795424 * 1e8}, + &TokenPayout{"DsfKkWJta3yPfRcukYvZAuGXmvwdKm75bsS", 282.63795424 * 1e8}, + &TokenPayout{"DsfkqejCcUDtQ3nERxaCpztPeR1K1Pod4Kg", 282.63795424 * 1e8}, + &TokenPayout{"DsfkrDiNAFqLxZem2DHD8Fuf4YyuinpLj48", 282.63795424 * 1e8}, + &TokenPayout{"DsfL1gkGMuhxALb8HRSVWfPcgexaxXSh4SY", 282.63795424 * 1e8}, + &TokenPayout{"DsfLJqU74ZumUqoZgQKQDDm6K2QPJDvawTt", 282.63795424 * 1e8}, + &TokenPayout{"DsfLpg1rAZyDe95XZtjS6g1mU41j4UnAw5s", 282.63795424 * 1e8}, + &TokenPayout{"DsfmcbYDt6UVGEgYNrJF1CtHskanFuJpa7c", 282.63795424 * 1e8}, + &TokenPayout{"Dsfmd6U8XfycCPX4M6Bsz2CbtwTXZvgiCmX", 282.63795424 * 1e8}, + &TokenPayout{"DsfMGaQMkJvMGXJ6hXUwFRgCLE1hAkuWqKd", 282.63795424 * 1e8}, + &TokenPayout{"DsfMRQXfpHG32cCQiSaPWhSyXjuwmoRcY5o", 282.63795424 * 1e8}, + &TokenPayout{"Dsfn6PZMbXKxkqH6uwzvrpsx1gw3mFVACrH", 282.63795424 * 1e8}, + &TokenPayout{"DsfnBaJHKQs3TMFCqPQogCGUXGcuBNdRd9Q", 282.63795424 * 1e8}, + &TokenPayout{"DsfnmKjPWPMzZyTEtdhw6jv2E19y6f9r9p4", 282.63795424 * 1e8}, + &TokenPayout{"DsfP71MvJxXbFZJntXPbwnCvtiKvA6RaZrY", 282.63795424 * 1e8}, + &TokenPayout{"DsfPBTsfKwD66jqkU1GY6eoTwpF8Kn1W9dG", 282.63795424 * 1e8}, + &TokenPayout{"DsfPbZzsUTsN7NRJcTsyoBdhY2vxEQxvVHd", 282.63795424 * 1e8}, + &TokenPayout{"DsfQ3gUh83qp2SLjHA5h3jBLNx75SheFZED", 282.63795424 * 1e8}, + &TokenPayout{"DsfQ6sSgTaFYkeDfAkKbw8DdU43uP2tonXH", 282.63795424 * 1e8}, + &TokenPayout{"DsfQ9ParwDdrPGoc3cKSHSLLKYphZKDoNBW", 282.63795424 * 1e8}, + &TokenPayout{"Dsfqiq7bFHZKwaCpQY9V7fvLCZczNDbwHsk", 282.63795424 * 1e8}, + &TokenPayout{"DsfQK7MCNbbFN3oyuCxo5JsTb4ujoqX5VAB", 282.63795424 * 1e8}, + &TokenPayout{"DsfQomTaNt6sA2cwS9RE2PEfkesEeH7oHRr", 282.63795424 * 1e8}, + &TokenPayout{"DsfQqHtUuTx4sWdcifkiR4bxcN8Q6DFDZzC", 282.63795424 * 1e8}, + &TokenPayout{"DsfQuovpyTQeSofe4yN4xbV3GeHDW1gtXuc", 282.63795424 * 1e8}, + &TokenPayout{"DsfqZijzhP4sEo8YZ9EkWGiyvrDd98G2jPV", 282.63795424 * 1e8}, + &TokenPayout{"Dsfr8uqRbwZpKhxntL86fSZByFUG7caauDo", 282.63795424 * 1e8}, + &TokenPayout{"DsfRi5YFJtKXgc1jME1gS9GsdLpK5ahnZ6d", 282.63795424 * 1e8}, + &TokenPayout{"Dsfs1kEJ6W5upt2Ds2dTuKsizQEndPoqw4Q", 282.63795424 * 1e8}, + &TokenPayout{"DsfstZF8U55tMoFKzogM4gnkXpW5rnTByBZ", 282.63795424 * 1e8}, + &TokenPayout{"DsfSzgsWCTRLSnEiev8nU2rXrPx5GBb6bSH", 282.63795424 * 1e8}, + &TokenPayout{"DsfTaqUz1Dax7RujazLC87UsZt8FG9msVxA", 282.63795424 * 1e8}, + &TokenPayout{"DsftnAycn9mUsNTkiqbi47BexCi1KuYcYpq", 282.63795424 * 1e8}, + &TokenPayout{"DsftRtWgjys2CMNTP7bVtJCkb8JEmM6qXhn", 282.63795424 * 1e8}, + &TokenPayout{"DsfTuStBNrShUyvzu3cEWfabMQxdGrxAGAF", 282.63795424 * 1e8}, + &TokenPayout{"DsftxtEWkBP984HXzRuFsQWRwkAGpgNu9hg", 282.63795424 * 1e8}, + &TokenPayout{"DsfTY7oGdmw2zekSGAFhzNRCH7TshWShJ3X", 282.63795424 * 1e8}, + &TokenPayout{"DsfuCzxVUGykNvmSwLDm2vC6DMsgD2bGAPU", 282.63795424 * 1e8}, + &TokenPayout{"DsfuD7hDn8DH7BdddBzwSUhtsESkeQKfw4j", 282.63795424 * 1e8}, + &TokenPayout{"DsfUNARfEwzupqqZrn3HrBc6VCkJbUYiSS5", 282.63795424 * 1e8}, + &TokenPayout{"DsfuQ3bRL4gfyZZD4eAq8xVvijDrGWMjzMt", 282.63795424 * 1e8}, + &TokenPayout{"DsfUUKqbF5YTDr4W4dToGMpU5ZHr17wCXVo", 282.63795424 * 1e8}, + &TokenPayout{"Dsfuyqjo5zeqAp6Yz1GMZCZKYMxyGBcseFy", 282.63795424 * 1e8}, + &TokenPayout{"DsfV89fgiMThjBrFAgfi3hZX2MttzXrfhCe", 282.63795424 * 1e8}, + &TokenPayout{"Dsfv8vK7JdgELUu5ikVjAhHwkhAn4ayhPgL", 282.63795424 * 1e8}, + &TokenPayout{"DsfvEs4nPqdoPSx4LuenczUzTHe5Mh2i3YF", 282.63795424 * 1e8}, + &TokenPayout{"DsfvGT1MBsP5Gup7ck6386WyJaaQKNpAo9s", 282.63795424 * 1e8}, + &TokenPayout{"Dsfw3GCvms6fNoT8Z6Fxq789gqEb5Wz6YZV", 282.63795424 * 1e8}, + &TokenPayout{"DsfwnhgMWbsZjM3oZfceQB26jM1xGtVXtdp", 282.63795424 * 1e8}, + &TokenPayout{"DsfWwtT7DqxQDaHtrDC9HdzwJLtmHvsFpSu", 282.63795424 * 1e8}, + &TokenPayout{"DsfwydJhuJNxZLX6n1MsonUFQychZSavbDB", 282.63795424 * 1e8}, + &TokenPayout{"Dsfx91kXZ1XRJRryPjGvJ5pvX1GXrGcJBmC", 282.63795424 * 1e8}, + &TokenPayout{"DsfxF56eLauSmsUonR3d6JQofXZuMDDrypn", 282.63795424 * 1e8}, + &TokenPayout{"DsfXKXfBJVAHUgk5gTh4LXzqKQjfPv13HFv", 282.63795424 * 1e8}, + &TokenPayout{"DsfXPKZikVG9qvV5BzcxK8ALHuv1gYrb815", 282.63795424 * 1e8}, + &TokenPayout{"DsfyDjxDB96zMdYJz8m9urPbqnncJX4GSH2", 282.63795424 * 1e8}, + &TokenPayout{"DsfZ4VjQy9vZzLvkhjrHtfYU5ccWxVWmo6M", 282.63795424 * 1e8}, + &TokenPayout{"DsfzAYnknSGWERRUKQAJyeuxiuvZJyKqNYG", 282.63795424 * 1e8}, + &TokenPayout{"DsfZBhCxZwBbRFpgutV86aA1M2Uhqw8Kb1E", 282.63795424 * 1e8}, + &TokenPayout{"DsfZgyCr2egpF6Qv3ywdojzgJvPJcqJ9bsS", 282.63795424 * 1e8}, + &TokenPayout{"DsfZm4ErRFEZaW6VHeCc5TQDMtiNDUiRTmW", 282.63795424 * 1e8}, + &TokenPayout{"Dsg11Zyn666M8uZGwC4pWteXmK8eKmXnuej", 282.63795424 * 1e8}, + &TokenPayout{"Dsg1dunxgwvEZiKxt8AJdax5pfzEAE5VdQ9", 282.63795424 * 1e8}, + &TokenPayout{"Dsg1GgTabo2mYTwDy3JXEZsH2gaaCeYVFrS", 282.63795424 * 1e8}, + &TokenPayout{"Dsg257VuYjnB9ULTvhEdYjuXe1F2vNRHJFH", 282.63795424 * 1e8}, + &TokenPayout{"Dsg2dVWMVbYjwpf6FAKfWhevEyNRDWaAryn", 282.63795424 * 1e8}, + &TokenPayout{"Dsg334xfP1rX4B7aYbpgZ9ugE52dia7hi72", 282.63795424 * 1e8}, + &TokenPayout{"Dsg3DfvbZWsb866Tgukm9bkcDBVpy9hf3L7", 282.63795424 * 1e8}, + &TokenPayout{"Dsg3zKxCEndBVJeXmpeCcnKD3zsHUGSbgxU", 282.63795424 * 1e8}, + &TokenPayout{"Dsg4HrBBqL9Mhj2sYrEXPNbwkG52zrtZgam", 282.63795424 * 1e8}, + &TokenPayout{"Dsg4km194DfzeWH5psQsnmgFJyw78AoFXYh", 282.63795424 * 1e8}, + &TokenPayout{"Dsg51cPSjVNrYH3nixbeDuyVXC2PVJfxHm5", 282.63795424 * 1e8}, + &TokenPayout{"Dsg5ek7uv9mbLrv12Jxk6dJdrKq4xf8aRe6", 282.63795424 * 1e8}, + &TokenPayout{"Dsg5VtUA1tXmucvYjUS7V1wAHyzbgsrxwzG", 282.63795424 * 1e8}, + &TokenPayout{"Dsg62AfS7QR2xJAkQX2WH83rDd1RD8HgeiN", 282.63795424 * 1e8}, + &TokenPayout{"Dsg66gdwSYxsaWbXwxZFxFvK9EmCFW7aGAD", 282.63795424 * 1e8}, + &TokenPayout{"Dsg6C8QvGJ2cUWWRLRPN9nfxnsyHju42rys", 282.63795424 * 1e8}, + &TokenPayout{"Dsg6XrcPWRqNtycxNveBocayVhWB9XwfchA", 282.63795424 * 1e8}, + &TokenPayout{"Dsg77arQV4h22EcBftsqyc5D6gf3zX1BUqc", 282.63795424 * 1e8}, + &TokenPayout{"Dsg7AS3WqqSiAvfmZ4DQxsqHnrZuTK4xyQk", 282.63795424 * 1e8}, + &TokenPayout{"Dsg7av7SZPeboUhRCcWzQa1TwTy1M7qEXSR", 282.63795424 * 1e8}, + &TokenPayout{"Dsg7MyRdjserQNnJEQkuXyL5MKExmA3BfBy", 282.63795424 * 1e8}, + &TokenPayout{"Dsg7P1VzvhoYXPVYsFTUEUCfcz1UaqnVykZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsg7Rix1qcsx2zUU5Yo7m2c3qHkUez4e1rR", 282.63795424 * 1e8}, + &TokenPayout{"Dsg8c4w5M4cJ7xbkZfFSWxFrgvq2jLHyCLF", 282.63795424 * 1e8}, + &TokenPayout{"Dsg8GumJhg8oh3aGJg24nYhYZaFSdHLjSnr", 282.63795424 * 1e8}, + &TokenPayout{"Dsg9EzyCwnTFKpjqRRX1T8hQu5PHBYht6wR", 282.63795424 * 1e8}, + &TokenPayout{"DsgA89hZBtWzQwvmhC9ccgWAs3NxQoWYmEK", 282.63795424 * 1e8}, + &TokenPayout{"DsgAFPNf6Dy9nvVVN7ybqDoaCjCzPYTFH5r", 282.63795424 * 1e8}, + &TokenPayout{"DsgAgcFvYZRbsUQpj9VdZCgZTMYvn4Adwxj", 282.63795424 * 1e8}, + &TokenPayout{"DsgAo2oZY77oF5XnNhcEtmvra6dorwkjLQy", 282.63795424 * 1e8}, + &TokenPayout{"DsgaqRGjy7GAa2VQaj6f9hGbxzoRsG7bGyp", 282.63795424 * 1e8}, + &TokenPayout{"DsgaRQ9z3W6qGuJy1YmzTtDgBhoNsaNhu9o", 282.63795424 * 1e8}, + &TokenPayout{"DsgASL62q4k5zr4VxG8szDhsbqpH1g5WXaQ", 282.63795424 * 1e8}, + &TokenPayout{"DsgbFgCfrPUMkmitEn2ncxvNhvGYajJZxBQ", 282.63795424 * 1e8}, + &TokenPayout{"DsgbmMJTExHLVtmBbSKHwVSTsqRcCWgnXJu", 282.63795424 * 1e8}, + &TokenPayout{"Dsgc4pPii63VNZQy29xdi9ajNH1C4fhyWoE", 282.63795424 * 1e8}, + &TokenPayout{"DsgCKu3soSwg4XgFie5sNqevCgtviDwBDxu", 282.63795424 * 1e8}, + &TokenPayout{"DsgdDATtFnBU4pTYDwnbULP7u9CAXDHMccM", 282.63795424 * 1e8}, + &TokenPayout{"DsgDGdnTL3iYHS4cVAr9q7EidS4Avu1SKgU", 282.63795424 * 1e8}, + &TokenPayout{"DsgDj773oQs1nmi8Qj41k4m86h1LgATAw3B", 282.63795424 * 1e8}, + &TokenPayout{"DsgDoKNwnpq13Cd2masVErGN2onHtowZ7fJ", 282.63795424 * 1e8}, + &TokenPayout{"DsgDPWShqkyFdQCttpvNnQtAkKXcAWmWH8y", 282.63795424 * 1e8}, + &TokenPayout{"DsgE3ZJoeXpHiNSmMJYueKXv9bKpfVgNTpV", 282.63795424 * 1e8}, + &TokenPayout{"DsgE4Bs7HtizQ1Rh2QCwUy2F91JAySkGsQH", 282.63795424 * 1e8}, + &TokenPayout{"DsgeWLJMizhZAXSh42bxzuR8GDqFZG4GFFM", 282.63795424 * 1e8}, + &TokenPayout{"DsgEY6aBzoC1sCBaiQfgHuWv2ip1Jzm5dLA", 282.63795424 * 1e8}, + &TokenPayout{"Dsgf5EcPPrwfB6QYBvuWYJPHAJ6rU3z7qJd", 282.63795424 * 1e8}, + &TokenPayout{"DsgFAuVYhMyKbVToxBDsybxnJph5VWoF1W4", 282.63795424 * 1e8}, + &TokenPayout{"DsgfFzD3LPn3iWTv6qJa6xzx6bBATG6dZsW", 282.63795424 * 1e8}, + &TokenPayout{"DsgFivtk9neWYPmf2hX3AfZHeALHEGm81Fx", 282.63795424 * 1e8}, + &TokenPayout{"DsgfZQJWFJMP57BXzNEofsffgcxyFhKV9Xg", 282.63795424 * 1e8}, + &TokenPayout{"DsgGcYPsFnSSW8PbFaSy2ky1WUuCgZWo6bV", 282.63795424 * 1e8}, + &TokenPayout{"DsggfGTmUem1AbyX8gh31Ex7s1ZAZJURgft", 282.63795424 * 1e8}, + &TokenPayout{"DsgGftAn9PFhkEEZ1DVoJre5NAtZkXQPa8p", 282.63795424 * 1e8}, + &TokenPayout{"DsgGix8YuhpksnYx9WGuGrXRxUaozq8Lzps", 282.63795424 * 1e8}, + &TokenPayout{"Dsggn5uARP8TeqvJJKyFxDiRnFYbK3EzYbz", 282.63795424 * 1e8}, + &TokenPayout{"DsggnLa35oMPTZraeJ1P6Rey1XTn8p1HkGr", 282.63795424 * 1e8}, + &TokenPayout{"DsggPPMa8HtX2aDrtA4khsM3aoaW9gpn5Po", 282.63795424 * 1e8}, + &TokenPayout{"DsgGz1E6uozqAApu5zPjoBxMLqPvRX5m9Dd", 282.63795424 * 1e8}, + &TokenPayout{"DsghDbx3SPtMUzRU9twVB1WALmWwLaPcz1L", 282.63795424 * 1e8}, + &TokenPayout{"DsgHsmixfTfTqWQwfBMQ58LKdciaXPgEr4c", 282.63795424 * 1e8}, + &TokenPayout{"DsgiJLGhuSn736Mnx7kjdwbuPtcmcbehsFH", 282.63795424 * 1e8}, + &TokenPayout{"DsgivQaoQe6nBsv5EsQmtReK8CwSLXHzcsT", 282.63795424 * 1e8}, + &TokenPayout{"Dsgj3AzTXNwtjYsmV2TC2RoT3PvbbfEKZhE", 282.63795424 * 1e8}, + &TokenPayout{"DsgJ6YGFKTrrdtZvUUV3xR9YPvL2h78zmQV", 282.63795424 * 1e8}, + &TokenPayout{"DsgJch186AjnC9doNhFbsgY51YLGXZVfHVf", 282.63795424 * 1e8}, + &TokenPayout{"DsgjMWxvMiSEV9F8cgnQDzPmXeTHPr62SJa", 282.63795424 * 1e8}, + &TokenPayout{"DsgjRVNy7gcU6PpGuYiCYdAVLfDQ4DVyDwm", 282.63795424 * 1e8}, + &TokenPayout{"DsgKBTarCyZ1cMqw3jeVLXbVsALSVSECVix", 282.63795424 * 1e8}, + &TokenPayout{"Dsgkc6hQCpZWxrvHvPMZZoLhNhDcmFyx65x", 282.63795424 * 1e8}, + &TokenPayout{"DsgkdTmUeeYY1ftnZrx2eXzdccNvWwZTkjL", 282.63795424 * 1e8}, + &TokenPayout{"Dsgkg44tg9fxLcN3EBDKPKiFTYh8nmzJ9Cx", 282.63795424 * 1e8}, + &TokenPayout{"DsgL56z5e5wA1sE2MQYatPi1RNTDcqQK1Qr", 282.63795424 * 1e8}, + &TokenPayout{"DsgLUdZ41316BA8RdDBSakFLpLnwBM2ubVm", 282.63795424 * 1e8}, + &TokenPayout{"DsgMfEu5TptPpQSrGazJwkFUFJUuxSmDzsJ", 282.63795424 * 1e8}, + &TokenPayout{"DsgmmCazEwQJ8NAXJJR1o9E7eL39YVvc7hZ", 282.63795424 * 1e8}, + &TokenPayout{"DsgmxeVba9ajGnQRPWEyKgz1RRyH6swVGCW", 282.63795424 * 1e8}, + &TokenPayout{"Dsgn7RfPybonh8ytqpoHnwaKbXYu85Hiiyu", 282.63795424 * 1e8}, + &TokenPayout{"DsgNBkqGh7V7WhG9ijLiMofRQSHN39JSTC5", 282.63795424 * 1e8}, + &TokenPayout{"DsgNDRYyYKsHTDXXXuarKJUV8WgtmFRaoje", 282.63795424 * 1e8}, + &TokenPayout{"DsgndXa8NUfmjS4H3pwY1LU1cmNpaCnV2ZL", 282.63795424 * 1e8}, + &TokenPayout{"DsgnRadV7pxXA1DeXiSMaWvCbFZ3gEbq8HG", 282.63795424 * 1e8}, + &TokenPayout{"DsgNZxEnGxsjEts9PNRUKK2j3NvwVNZWBCh", 282.63795424 * 1e8}, + &TokenPayout{"DsgoqGxzYYZ4v6f5fzE9LgtdpyRwmNzNqgP", 282.63795424 * 1e8}, + &TokenPayout{"DsgozyAJxJ4hy7qKtS7MiHjKkhwhSJxfuLf", 282.63795424 * 1e8}, + &TokenPayout{"DsgpkDXXGjUudKvrtVQSj2zKXa6KQdktYto", 282.63795424 * 1e8}, + &TokenPayout{"DsgPM29HZKexENBQEAESohvw6e9NKrF9nSE", 282.63795424 * 1e8}, + &TokenPayout{"DsgPQzBmKVYtipH2SDoqd32bdQQyrRKWyQF", 282.63795424 * 1e8}, + &TokenPayout{"DsgPYLvfUrExWzACAu2q1aKpz1MSdRsUGNf", 282.63795424 * 1e8}, + &TokenPayout{"DsgPyytmxkUWHNjXRSjVnLCeoCxmhwjYMLS", 282.63795424 * 1e8}, + &TokenPayout{"DsgqAP3mRLusCGWaoeoZBT7EXFsjqpjNW5W", 282.63795424 * 1e8}, + &TokenPayout{"DsgqBbQvVE6x3ASPkd3JcKHwCvaNZetE8iy", 282.63795424 * 1e8}, + &TokenPayout{"DsgqGiC1X2TBxRgLbBX31XxQRcxhyXyZTKv", 282.63795424 * 1e8}, + &TokenPayout{"DsgqxyUSQdNHGUTKNLU4qFzgKcL7KXmk8Eq", 282.63795424 * 1e8}, + &TokenPayout{"DsgR1RkQEur6GqAp1ku8ErRBAnKvaJeqXnV", 282.63795424 * 1e8}, + &TokenPayout{"Dsgr1wAwGFjTYqdbUR1NFm5ZDzpZqUmA96v", 282.63795424 * 1e8}, + &TokenPayout{"DsgRcpD9u1LcvtaBeVfKKft31LD8NNpjhpF", 282.63795424 * 1e8}, + &TokenPayout{"DsgRgC5bvjHT3pW7pUb7nphogcSqLHtS7NR", 282.63795424 * 1e8}, + &TokenPayout{"DsgRMq1yR7ZGird5UAG1jVwfeB16WSojWa3", 282.63795424 * 1e8}, + &TokenPayout{"Dsgrr29m5fHjTdqu4116uCUdaFhraVmriWX", 282.63795424 * 1e8}, + &TokenPayout{"DsgRZjrHfDaJQGKuPu77TnCZCLEDLp3Mnvu", 282.63795424 * 1e8}, + &TokenPayout{"DsgsAgvcRZMrvAQcpied2oj3mpuKbKLhWa4", 282.63795424 * 1e8}, + &TokenPayout{"DsgsFvRYvRuXdfWNH7EaCRuAwpXWXiC1dZ4", 282.63795424 * 1e8}, + &TokenPayout{"DsgsQbpMLJ3eRvkUbV7GhWW6ev7yWBnQYTQ", 282.63795424 * 1e8}, + &TokenPayout{"DsgSSaW5zmG9gk35qKUCDzqHdam2HGHrb8p", 282.63795424 * 1e8}, + &TokenPayout{"DsgSvAuTygr6AXTACCcbiQzhGNqVCBrNv5M", 282.63795424 * 1e8}, + &TokenPayout{"Dsgt1QW6a2prGxmALVvLxB57vM1tCeV5dWg", 282.63795424 * 1e8}, + &TokenPayout{"Dsgtae2MZKJcqH3aKXFHYCrP5EbBvjv4Kka", 282.63795424 * 1e8}, + &TokenPayout{"DsgteeK9TccH5oWPtL5hP6TbFG6NcVsCCoR", 282.63795424 * 1e8}, + &TokenPayout{"DsgTfEw6HtccJ8QVVfoBzpwjw8udma2fWrt", 282.63795424 * 1e8}, + &TokenPayout{"DsgtkVYmpQUJWweKTDH2iJewRonEUFAGpEE", 282.63795424 * 1e8}, + &TokenPayout{"DsgTsk2b2VePoQUJy5zCHwdP4gSmaaPztCz", 282.63795424 * 1e8}, + &TokenPayout{"DsgtUn5UqvBCwL49JQykYKiAo3Q8ZoAjq6S", 282.63795424 * 1e8}, + &TokenPayout{"DsgTwJ9SWVnSVxWyVpedoWvYZjyX1B9mxAV", 282.63795424 * 1e8}, + &TokenPayout{"Dsgu5joQSbVPgX2Mtz8VBGt8cbR5ejj3o2u", 282.63795424 * 1e8}, + &TokenPayout{"DsguCsFzKQeTTNBcTFMBzb7S8xjRk3HCSCe", 282.63795424 * 1e8}, + &TokenPayout{"DsgUGvTbuibvy1VMABqJLV5pDzSuxLKrqTw", 282.63795424 * 1e8}, + &TokenPayout{"DsgumwbC5rBFiBZsrAonwLCtVmWmb457g9X", 282.63795424 * 1e8}, + &TokenPayout{"DsgUTt6gCcL4rixdBMTbyHzEE4Lmc4tDfVy", 282.63795424 * 1e8}, + &TokenPayout{"DsguwfxDpd4yLU7Eoa2sYehXz3QJKwZnv9Q", 282.63795424 * 1e8}, + &TokenPayout{"DsgV35iutBuMruGm51L2u4TgfYhdFvZTi4q", 282.63795424 * 1e8}, + &TokenPayout{"Dsgv3f2nXSgasc4eMS2UsgXP8kpiSYerxJX", 282.63795424 * 1e8}, + &TokenPayout{"DsgvM1zsHcwuKuzKQHvXdKGQ7BRazPc4Yqk", 282.63795424 * 1e8}, + &TokenPayout{"DsgvPHHuRkvbPianqKkGrTojB6Pj3bMpM3k", 282.63795424 * 1e8}, + &TokenPayout{"Dsgw5P7LeDBNNjnLbvJdcmPDRJxAsVQidyk", 282.63795424 * 1e8}, + &TokenPayout{"DsgwFLyqvMLTQYNifFSwBhjVojWBvYeq9cK", 282.63795424 * 1e8}, + &TokenPayout{"DsgWKLh5PT5pzfuFR8sx4XDJsnKd6vkB8ya", 282.63795424 * 1e8}, + &TokenPayout{"DsgwnBxew7pqcpaK1oZSs7XoZKVPEunJaNu", 282.63795424 * 1e8}, + &TokenPayout{"DsgXeygxbDMvdDnoL8kKA3jMoGmwvx1G2an", 282.63795424 * 1e8}, + &TokenPayout{"DsgxjGBsWNqyXAKvoYKmnNiCEFDQ7HQtvyL", 282.63795424 * 1e8}, + &TokenPayout{"DsgxR5YTnmZaqTfXZ7EcLU8LYXPgEBtbvfw", 282.63795424 * 1e8}, + &TokenPayout{"DsgxuqNhaa9UH6HXNfG2R8CtvVvQcENhQcH", 282.63795424 * 1e8}, + &TokenPayout{"Dsgxxka8JCbvM7oKAUNkLmKcAh3SD7LzCb2", 282.63795424 * 1e8}, + &TokenPayout{"DsgYD8E1PcgZdKVfvzNj6Ze6jKrwbjLp4A6", 282.63795424 * 1e8}, + &TokenPayout{"DsgyeKg3oJHSmXZWuynbRGCqvWxeZGUhBvG", 282.63795424 * 1e8}, + &TokenPayout{"DsgykRXaef2kxx5e9K5C9Kgy3MPb59v8o9j", 282.63795424 * 1e8}, + &TokenPayout{"DsgympfMp5HMink9qmvbv26Y3oBDEDLbPo2", 282.63795424 * 1e8}, + &TokenPayout{"DsgyPFGdUD1EfTCbcRu1c4cTJuuDjQ6feBg", 282.63795424 * 1e8}, + &TokenPayout{"DsgYrr4h57rUGeJaoEKiy3mB55dM7MD2zmV", 282.63795424 * 1e8}, + &TokenPayout{"DsgysrPnYjAyWmPyvJgpZJpcf8KCfxurDqB", 282.63795424 * 1e8}, + &TokenPayout{"DsgZ48z6w8wFovai54ByBboNkKwAygykHie", 282.63795424 * 1e8}, + &TokenPayout{"DsgZbuR5KMPt5RuYTNM7HbZE1i7TXLkGYne", 282.63795424 * 1e8}, + &TokenPayout{"DsgZhW8Nuhk78A2GMbHjTiU4kbKdDZ6V3x3", 282.63795424 * 1e8}, + &TokenPayout{"DsgZjuHTCRD6SrKAffSBE3hJ9nbPiz8zeWb", 282.63795424 * 1e8}, + &TokenPayout{"DsgZLUmaCS6KCVuMNAvN22d8unf2JLA6CGZ", 282.63795424 * 1e8}, + &TokenPayout{"DsgzR7JN2h4zLj7zys5G4jhcGaptECrUz6M", 282.63795424 * 1e8}, + &TokenPayout{"DsgzxUyy1fYZM7M4vsZZN12wzxPY6WUwzUe", 282.63795424 * 1e8}, + &TokenPayout{"Dsh1BmoRy95E7nedaL9pikqMG8ZBWsbZcND", 282.63795424 * 1e8}, + &TokenPayout{"Dsh1bmQn22JwwriFPHoXYo791tCHJQoD8JP", 282.63795424 * 1e8}, + &TokenPayout{"Dsh1C1mzXXRUQq7K4HUASZn63KaXUZspQQS", 282.63795424 * 1e8}, + &TokenPayout{"Dsh1SwgWSz5j8TUn8GppDPMcHjHrZPBqMut", 282.63795424 * 1e8}, + &TokenPayout{"Dsh2AgzshQvCuQuWXanZJQhMBAeTnbLNJKs", 282.63795424 * 1e8}, + &TokenPayout{"Dsh3AwMNmHxx5RomFqdx5DhHA1ZMrS64WHd", 282.63795424 * 1e8}, + &TokenPayout{"Dsh3BWc5qgBQbm5N6vYXSGFoU8oAQ3dv7o5", 282.63795424 * 1e8}, + &TokenPayout{"Dsh3jKedq5ionkfvXu9PUuiAQDn9chx3ng4", 282.63795424 * 1e8}, + &TokenPayout{"Dsh5bRFxrc4tDD56Yqh9FCxigXDigM4Fg1H", 282.63795424 * 1e8}, + &TokenPayout{"Dsh6ufafTg3FSCHN7t1nsbk2kSrNzEB4acH", 282.63795424 * 1e8}, + &TokenPayout{"Dsh6ycrCiPbxfoMUwVRPFVmgYpwL4kznUXo", 282.63795424 * 1e8}, + &TokenPayout{"Dsh78MvZiSgMmNcZc4wKEEKk7oVqoAiS1ir", 282.63795424 * 1e8}, + &TokenPayout{"Dsh7ciohZ52dLCC4SmSWN8ewgNtZvBAye3h", 282.63795424 * 1e8}, + &TokenPayout{"Dsh7dMCQWZwqjMMwxawTn8Kai7KrT4P64yV", 282.63795424 * 1e8}, + &TokenPayout{"Dsh7P2KDJki7dhqk2DnG91hAvEf8Q57dZvJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsh7ufhkTbmNkehUUu6iU7CuH3T8awLrJVo", 282.63795424 * 1e8}, + &TokenPayout{"Dsh88CF1KprMups7T3syVAjewa9URjqCnB9", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9qVU6Aw5fwcHLm9n6KVaz4kGMjsZJLpM", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9sf7fX7oAYetLvLWDJXN3ePUqnu7VZqB", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9sLjQWyVfcnfgj8yYjg1QqTYFY5xHZew", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9TYfz8L5Zpo7wjNEHqi6wpxKg5PsQxrn", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9Xk1uPdZThodJz5vJiR5ShZkjkMZMLcX", 282.63795424 * 1e8}, + &TokenPayout{"Dsh9z153KXSBsMWvh6qxgD1NYMBbm1a2TRZ", 282.63795424 * 1e8}, + &TokenPayout{"DshAm5CvQjarjnJJWv9g5iGoxaLf56AfKBm", 282.63795424 * 1e8}, + &TokenPayout{"DshaPJyKHNopXwtVmPh778z8MeKZyj5DVCv", 282.63795424 * 1e8}, + &TokenPayout{"Dshaz61TSJ5zXws3x9i6NFP7GtxkM6Wm4x5", 282.63795424 * 1e8}, + &TokenPayout{"DshbEWsRDEe2kn6oaxEWMXLVEfD2vxzGEFN", 282.63795424 * 1e8}, + &TokenPayout{"DshbjZAGT979MWqqkfUBdm1T5x3mZ9ymzKy", 282.63795424 * 1e8}, + &TokenPayout{"DshBTYp3GPpJ3xkwyEtSJFMVQBgzvK9o9ej", 282.63795424 * 1e8}, + &TokenPayout{"DshbWYdVJAMeR11dHvwgQYtbUQS1A7mp7Y6", 282.63795424 * 1e8}, + &TokenPayout{"DshBYufk8mHZeLLEXzqKZcuvC2mPYyR5YvX", 282.63795424 * 1e8}, + &TokenPayout{"DshCaW1HZ4DChY7h736XoKZ2LaVyRvJxLQ5", 282.63795424 * 1e8}, + &TokenPayout{"DshCf8aNrHbRXvhyLDFnXkVGarc6A639QrK", 282.63795424 * 1e8}, + &TokenPayout{"DshCfJywNRJg6My8ZvGHdBTdPgVUY4RRJYD", 282.63795424 * 1e8}, + &TokenPayout{"DshcuQRC55dphqJ6uykEEhDkiPv9jmozdy8", 282.63795424 * 1e8}, + &TokenPayout{"DshCV7e3mSp5vWxswnM5PccUk79aAZ2Mdaf", 282.63795424 * 1e8}, + &TokenPayout{"DshCXY2iLUKSyvRC61DxNpLyqRzcqwYuVFp", 282.63795424 * 1e8}, + &TokenPayout{"Dshdp8KVTbq55Nn5U4bgEJd1WgPtdeitjTv", 282.63795424 * 1e8}, + &TokenPayout{"DshdRS9n464JFVXRzd7CHKWy7WsdZ5cCvhq", 282.63795424 * 1e8}, + &TokenPayout{"DshDTH4ANs98GtGt8rW8ZHzBQD9fDAEF9dD", 282.63795424 * 1e8}, + &TokenPayout{"DshdtP1LmWid4rvuAzBarEDcfU5eN9ftKvo", 282.63795424 * 1e8}, + &TokenPayout{"DshdtWPTf81f92eP45dS9MEanDw71z6p2Ei", 282.63795424 * 1e8}, + &TokenPayout{"DshehCtgGpRK5aVcHanFd61DmGRUVXBFUva", 282.63795424 * 1e8}, + &TokenPayout{"DshEMFdmL2igfeaESDJBLwFG2bCEqBr9WxJ", 282.63795424 * 1e8}, + &TokenPayout{"DshfiiyW5zYaez2ReDhxffFe5B4rJBCsh7e", 282.63795424 * 1e8}, + &TokenPayout{"DshFVWFYunZET1JmZytyktKtMynPV86BA57", 282.63795424 * 1e8}, + &TokenPayout{"DshGf29bBYjAPN2FVXieoD9GVHU12dC66q5", 282.63795424 * 1e8}, + &TokenPayout{"DshgG79HwGfmEAHcsnKsZ5YhKoWMLW6sjjL", 282.63795424 * 1e8}, + &TokenPayout{"Dshh67onLpHGo2QcaB9X6YWXcUZz2A8CqEM", 282.63795424 * 1e8}, + &TokenPayout{"DshHBqqZTMV4Vfz563vZ5YD75wdcUuzXEKi", 282.63795424 * 1e8}, + &TokenPayout{"DshHDNBy4ooqoNfG52fBUs95PPgcnA4koL2", 282.63795424 * 1e8}, + &TokenPayout{"DshhDnDj9TNivussSytFWbGwiy4u5EHGjvQ", 282.63795424 * 1e8}, + &TokenPayout{"DshHkWxneaEE4mexB8QiiWT4NZfSm8TMK8D", 282.63795424 * 1e8}, + &TokenPayout{"DshHm3JrCzLvzcF7K3dRgCCRynnTjYbEhX6", 282.63795424 * 1e8}, + &TokenPayout{"DshhnVSSQALbEbTCQgrLQesS9BTxhFpEiTr", 282.63795424 * 1e8}, + &TokenPayout{"DshHqeSoEReJYoPiK8TEN6tzEuqe3n389KE", 282.63795424 * 1e8}, + &TokenPayout{"Dshi716BPDG3VVWBprBGEdzd1PHXjZF9acM", 282.63795424 * 1e8}, + &TokenPayout{"Dshj3gfnPwohJroKKybEehyu29mcYCVuWxP", 282.63795424 * 1e8}, + &TokenPayout{"DshkDQdTbzEX2zFP2jcuxLX7Q3ymk7FDeg5", 282.63795424 * 1e8}, + &TokenPayout{"DshKjZE2tuVjDDRMdJ4tPDmSdKf8wQEK5dc", 282.63795424 * 1e8}, + &TokenPayout{"DshkqZiCuCj2sziPFsXs84f5jEQS97FxpNU", 282.63795424 * 1e8}, + &TokenPayout{"DshKsuFYhNoHbgWCLPkonPSMvRFcFDSaDAr", 282.63795424 * 1e8}, + &TokenPayout{"DshkyULoQfVG5gz5DZM6Q4n8N9nYeidizwk", 282.63795424 * 1e8}, + &TokenPayout{"DshLcYbzpgKTu6zNNuefiXkqCbzxcjmF72R", 282.63795424 * 1e8}, + &TokenPayout{"DshLdzzu1rAWvfLqEeVfhg4fBTs48tie27y", 282.63795424 * 1e8}, + &TokenPayout{"DshLgS5qfq1oeDczRuusVouniENmxzP4vbQ", 282.63795424 * 1e8}, + &TokenPayout{"DshLPQAymoktGyR1HUbuYbhzDyg4jfVDq49", 282.63795424 * 1e8}, + &TokenPayout{"DshM4FeGsj4NTvFgpfxHfBqNd9f3aydQjx2", 282.63795424 * 1e8}, + &TokenPayout{"DshM5UAgN8NocWWvg4VGteFTVNVQykdRY5Z", 282.63795424 * 1e8}, + &TokenPayout{"DshM5xPAsuAxFtokG9ePv8Qb9A6ojYtAmwr", 282.63795424 * 1e8}, + &TokenPayout{"DshMedLrxmtqfwX8mp1VgUisA4jKcEu4JpG", 282.63795424 * 1e8}, + &TokenPayout{"Dshn38TAmiCyEMbBAwFG4nEEnCvPNKtevfX", 282.63795424 * 1e8}, + &TokenPayout{"Dshn3rzqxbe35TuBDa3wULXUrYdyPEQgVQS", 282.63795424 * 1e8}, + &TokenPayout{"DshNgMDve5cNHmf112e1xmakcrpk4FerqpJ", 282.63795424 * 1e8}, + &TokenPayout{"DshnhunbrX55j9i9PSkjjvi8Rn7RLsj8f8E", 282.63795424 * 1e8}, + &TokenPayout{"DshNPRJ7fvscu1tAcF572XwX93UYneBoQqc", 282.63795424 * 1e8}, + &TokenPayout{"Dshnr3cgiLAhT6WbtJMH4UB7bVZ85KSTRm3", 282.63795424 * 1e8}, + &TokenPayout{"Dsho9Rqht6J2MNYG9fvdzpVJ1dGdgJupp72", 282.63795424 * 1e8}, + &TokenPayout{"DshoE7c5MKMFNCHqKNVxKLZPYbEzGoN9D1N", 282.63795424 * 1e8}, + &TokenPayout{"DshonGVAAP1Lh2pPDiy1DJZRxA8iuPppwKK", 282.63795424 * 1e8}, + &TokenPayout{"DshonzjpYsZwCGcktwR8Ve37CdRxpNimarf", 282.63795424 * 1e8}, + &TokenPayout{"DshoPdreCWDEPzpkEufLMv6Djvgw3gK6qjy", 282.63795424 * 1e8}, + &TokenPayout{"Dshp1K2rjNqo7gnwsXS1RJ9cezFpY9Kekvr", 282.63795424 * 1e8}, + &TokenPayout{"Dshpcy8rcyxJAykE5yf6ZW1kTekhEYyLTnH", 282.63795424 * 1e8}, + &TokenPayout{"DshPDMnaJBETcjiVcMjQrtBK7Tq3dg4pcyE", 282.63795424 * 1e8}, + &TokenPayout{"DshpFQGa3PrYQhVCd3FuAUBwDhspwPgXM7D", 282.63795424 * 1e8}, + &TokenPayout{"DshpgvZHtdvuDVDDf2yrkxUGmGWKwtEkbyy", 282.63795424 * 1e8}, + &TokenPayout{"DshPKW5LQhyqN37YAh1BJjn1gnyEWhAACKf", 282.63795424 * 1e8}, + &TokenPayout{"DshppbdUs5dH5PbFgNBudW93vua9eCf9EHf", 282.63795424 * 1e8}, + &TokenPayout{"Dshpqp3mKgkzViMWEFd1qArbd3kXZCjnTwK", 282.63795424 * 1e8}, + &TokenPayout{"DshQ92xGaYWa74ivfmBteB8MQmvPZoANmBv", 282.63795424 * 1e8}, + &TokenPayout{"DshQaapkWHgmf1hDdLuftBrHDL48eYzYvQX", 282.63795424 * 1e8}, + &TokenPayout{"DshqAyJQiyjX3t8WBbf3D1ttW9nko1H424W", 282.63795424 * 1e8}, + &TokenPayout{"DshQBtRGoswRAMq8W2cPVaFPDj5JUMGUgGv", 282.63795424 * 1e8}, + &TokenPayout{"DshqKbVxwATfssdtkjjXcCPxEB4ocHiJn4r", 282.63795424 * 1e8}, + &TokenPayout{"DshQkCB5PxQir9Fakk9vRTRjdg43vQ4ZuF6", 282.63795424 * 1e8}, + &TokenPayout{"DshQLaPyx2DNFU346Yb389hBXfh1Dw4rvmu", 282.63795424 * 1e8}, + &TokenPayout{"DshqT9yQrpuuw19LAMBQXF8vXk59kgQ93K2", 282.63795424 * 1e8}, + &TokenPayout{"DshqwtdwcDrQLgn5JjfvNKTxH5jXH5ar7tQ", 282.63795424 * 1e8}, + &TokenPayout{"DshrNsxUryrVctdvzE4JxQ512xepnu9FeEe", 282.63795424 * 1e8}, + &TokenPayout{"DshRU7JqYotmEiTGtuEqxpFsgUMk21r9h66", 282.63795424 * 1e8}, + &TokenPayout{"DshRV5v8XEVpozeEEaZffvtzrfJLqBnYRnX", 282.63795424 * 1e8}, + &TokenPayout{"DshrVZ4vWKNdnQMbZ813tZbMAaHNpYLpxQY", 282.63795424 * 1e8}, + &TokenPayout{"DshsaXCxZ2DD29cTZWK8Wg8DjsXYRoSKyL4", 282.63795424 * 1e8}, + &TokenPayout{"DshSGxdZ9faRMSjZ8T2YigV4g2HXCcFf2un", 282.63795424 * 1e8}, + &TokenPayout{"Dshsj5EHhauY3mtEZjrDDePBtXw5JhzZH9R", 282.63795424 * 1e8}, + &TokenPayout{"Dsht2bAMVi12ZvJAiktnojq2EvszBSZLEAK", 282.63795424 * 1e8}, + &TokenPayout{"Dsht2QGw7NyZtsB8g7uMeTa6gtAu9cxyaLi", 282.63795424 * 1e8}, + &TokenPayout{"Dshte8guGo8fQURm2ZCT1UAWaieZTswd1UK", 282.63795424 * 1e8}, + &TokenPayout{"DshtNRBGGnmHYgEmHBQsGZtYY3AHXBr3n9d", 282.63795424 * 1e8}, + &TokenPayout{"DshTqweN32LpjVu2Grx6Gb8JVrUYS3AFmbT", 282.63795424 * 1e8}, + &TokenPayout{"Dshu87EvmZ9mnHZXCysicdW8z6HC2tpUSe5", 282.63795424 * 1e8}, + &TokenPayout{"DshuLFUBd7WBjpzVdg6gauS9ac1HW57VQxq", 282.63795424 * 1e8}, + &TokenPayout{"DshUodHTmjGfhBgm6Erwqfhy3rHE5dbMuiM", 282.63795424 * 1e8}, + &TokenPayout{"DshVAorTeSMRYnbrRiiB3Dc1CGAfiYDxy2J", 282.63795424 * 1e8}, + &TokenPayout{"DshVPQ6RCLAvEb9McuSsDJ8TFoRh9FHYuqg", 282.63795424 * 1e8}, + &TokenPayout{"Dshw1fPz25KCfviSy2ghbUoz3yUhzJSbT65", 282.63795424 * 1e8}, + &TokenPayout{"DshwhL388ijjJSATa8rJnUPwsfKSELCRsiT", 282.63795424 * 1e8}, + &TokenPayout{"DshwoZhCGrw8rf3fVzv29DA9YyH1wd9rbyg", 282.63795424 * 1e8}, + &TokenPayout{"DshWtUvP1Qvvs1NtDQFzGVQ6yxnUWAnyHhz", 282.63795424 * 1e8}, + &TokenPayout{"DshxFxiByGr11uPjoi9YjwfPioki7fDJtBA", 282.63795424 * 1e8}, + &TokenPayout{"DshXnd9e7vjhptSGzYbbLJDRakF4WMN5ZZZ", 282.63795424 * 1e8}, + &TokenPayout{"DshyC5Fuo4EWhiRwqG1d2NVFn6cryrL8RRw", 282.63795424 * 1e8}, + &TokenPayout{"DshYJyavrFxmmFPmhU9ZjWhYHhpzBeRN5Sz", 282.63795424 * 1e8}, + &TokenPayout{"DshYqZNa3yrquQec1M7Ns16zttpCtoKGFVQ", 282.63795424 * 1e8}, + &TokenPayout{"DshySPPSYHMSnRhTVSF5J4FdXnqrjQ9DCrB", 282.63795424 * 1e8}, + &TokenPayout{"Dshyvf1pQzp1FZXp5cAchJfDBuxmSFSA7Xx", 282.63795424 * 1e8}, + &TokenPayout{"DshZDUEtuJHaWPzL6ptGCVPkV8fg3hnXnZT", 282.63795424 * 1e8}, + &TokenPayout{"DshZKsfvyhMw5qiwFxVfFhjjcgdsW1iHoSh", 282.63795424 * 1e8}, + &TokenPayout{"Dsi1AXEi7n9Gb6rHwU8ZxrBp7Gx7P8H2GPF", 282.63795424 * 1e8}, + &TokenPayout{"Dsi1NjWErydVWoW9fmokkD6TMC1matYvRux", 282.63795424 * 1e8}, + &TokenPayout{"Dsi2BTiycGEZ2e8kRGcahpDV9jSZmB2Fd8U", 282.63795424 * 1e8}, + &TokenPayout{"Dsi2Rs28YsccGmQ4mUsAytgDvgmUt14vErL", 282.63795424 * 1e8}, + &TokenPayout{"Dsi2SAVcCw3pnXyChpdijMPDSXzYoZCpV5Y", 282.63795424 * 1e8}, + &TokenPayout{"Dsi35yLSdJuoBnvjaA1RLfFof3H74ufbHy6", 282.63795424 * 1e8}, + &TokenPayout{"Dsi36FBN8gvKLMNdKY4CqYbRV3CAvCPNNVQ", 282.63795424 * 1e8}, + &TokenPayout{"Dsi37KLAEiYp6Jn7CxSPT5KHRCmS5jDm2cS", 282.63795424 * 1e8}, + &TokenPayout{"Dsi3Hy3HTTscWfuCqZqotpfvvjNW7RLCMn5", 282.63795424 * 1e8}, + &TokenPayout{"Dsi3nSe8vSoBX8WmnnoyU17djUCQEy5eExT", 282.63795424 * 1e8}, + &TokenPayout{"Dsi3Nt5CZZy5iVvVnYNnJcjNP7J1xQ1MbFE", 282.63795424 * 1e8}, + &TokenPayout{"Dsi3Zy3xR6g3xgZSNdnVqnMNn6HPG3RgXKj", 282.63795424 * 1e8}, + &TokenPayout{"Dsi4UMVM1A4BVmV1mc3CxKwpNqbNTjHnYLG", 282.63795424 * 1e8}, + &TokenPayout{"Dsi5Bg6kCE1WCMtycwYAJkMJ897itLVMULs", 282.63795424 * 1e8}, + &TokenPayout{"Dsi5JxWAXXAfyGizo87kK1cpTbfb1msgy3W", 282.63795424 * 1e8}, + &TokenPayout{"Dsi65YutrDux9A9UjXbjuhK828gY6HkugTo", 282.63795424 * 1e8}, + &TokenPayout{"Dsi6U2JAyKcsSRCydgjea9qH4vNunMQQctZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsi7bck7T4syct8ZdSnWrx2twoiL8Jpee89", 282.63795424 * 1e8}, + &TokenPayout{"Dsi7jxXRapbqReBG6k1gvJAaYbuEnya5oTN", 282.63795424 * 1e8}, + &TokenPayout{"Dsi7ZtG3SuibzXU7YdP7ZBdTBAH35CCfLTG", 282.63795424 * 1e8}, + &TokenPayout{"Dsi84kZLxfuaLSg3gSYLURgscLCJnEtkveX", 282.63795424 * 1e8}, + &TokenPayout{"Dsi98BxbsXtjJj9A53LF73eANSRnLc4yhJY", 282.63795424 * 1e8}, + &TokenPayout{"Dsi9NrUM3Mqcfds8K8DdiqZdDuwFBbpfUwc", 282.63795424 * 1e8}, + &TokenPayout{"Dsi9xphSMyCR33KNyPhQQ8crUemuY5vvoSq", 282.63795424 * 1e8}, + &TokenPayout{"Dsi9YgeM8bejWtxZZhv1Gs1Vujyq7A3718e", 282.63795424 * 1e8}, + &TokenPayout{"Dsia6YTj8UmeJuiTXfDm43D1A9nxLqgZoko", 282.63795424 * 1e8}, + &TokenPayout{"DsiaBgMWRYS5u2isRo7y89gc2F8PEHZxCWv", 282.63795424 * 1e8}, + &TokenPayout{"DsiaBjteYsfXJZFQJGRg9R9XkCS1hf1oTbS", 282.63795424 * 1e8}, + &TokenPayout{"DsiAh673YhAYGtjPKskg1kaCfP7zXhEGoCF", 282.63795424 * 1e8}, + &TokenPayout{"DsiAjTsK5vKWQ9cUskWbThH8S5qHq3EYhco", 282.63795424 * 1e8}, + &TokenPayout{"DsiaQ9yibZdnmSWZXGbacpiBDVaoWuqSv3Y", 282.63795424 * 1e8}, + &TokenPayout{"DsiAS7gV2gX3VUAVMXqpVFyWAtC1tC494C5", 282.63795424 * 1e8}, + &TokenPayout{"DsiAuvdTMJxyu6Y9duT4wDQuMZ4SnnPGSd7", 282.63795424 * 1e8}, + &TokenPayout{"DsiazMbfgkMNBXktZ6WcXYh5J6bRgwhQCwr", 282.63795424 * 1e8}, + &TokenPayout{"DsiB9eW2wXafEz7KENAFDdePFHjyFufRtet", 282.63795424 * 1e8}, + &TokenPayout{"DsiBKSemwhjoJrsrwwqCxaAkBFGZfwYXNKn", 282.63795424 * 1e8}, + &TokenPayout{"DsiBkxucyG7GwjfX76HD3GWxZT49JFLZgwj", 282.63795424 * 1e8}, + &TokenPayout{"DsibL8iYENh1aEbkq9TxAYY9Q2w1Eo7Crco", 282.63795424 * 1e8}, + &TokenPayout{"Dsibu9iXwoUzn5DPvYad9PQmaC5B9sUJntg", 282.63795424 * 1e8}, + &TokenPayout{"DsiBx2f7SFm6wk6LHv9e8KkBZWxHjBubKji", 282.63795424 * 1e8}, + &TokenPayout{"DsiC41bDrvmY6uo9ja6hjzzyGmS6h9yXXtf", 282.63795424 * 1e8}, + &TokenPayout{"DsicGbXyY6gGod6Rdov2k57gQS6cQq96Jo9", 282.63795424 * 1e8}, + &TokenPayout{"DsicjbRnni3QsQqPU9MgeCYpsRAotbLxJQP", 282.63795424 * 1e8}, + &TokenPayout{"DsiCKypTufUiMqBWAs8B9HvJS5hfMLvebb2", 282.63795424 * 1e8}, + &TokenPayout{"Dsicswvsns7Sij4W8Q6e7PHK2dgVnN86jPb", 282.63795424 * 1e8}, + &TokenPayout{"DsictnWnwU6WSjHar2hkTR6JuTXnkkzpv75", 282.63795424 * 1e8}, + &TokenPayout{"DsiD66nWQV9JBhtmrhDMhAQ5qPcrzg2GxhD", 282.63795424 * 1e8}, + &TokenPayout{"Dsidb1h3QiZdpvb6TLfcAzwSBXuwoXrGTTr", 282.63795424 * 1e8}, + &TokenPayout{"DsiDLXWa7rNNznzScLcNqdKoW6naaXdzNGf", 282.63795424 * 1e8}, + &TokenPayout{"DsiDVhjut1s6ew1oEt8UhPLVuTkXQHRhpdS", 282.63795424 * 1e8}, + &TokenPayout{"DsiE8zfKktx8QMkfXFqDZWB6SHwg2SPuZNC", 282.63795424 * 1e8}, + &TokenPayout{"DsiEgy3rV1ZZaKCHJ8yqfVqirpTyrbcfyJs", 282.63795424 * 1e8}, + &TokenPayout{"DsieinSz7RRU2wtM443184A4U6duMCudVxX", 282.63795424 * 1e8}, + &TokenPayout{"DsieJDc6Ly4YtCmAMBYcS6wHfPSPZjPb9yg", 282.63795424 * 1e8}, + &TokenPayout{"DsiePVeFKx5oomm3rssMmCF923S7cRVesBS", 282.63795424 * 1e8}, + &TokenPayout{"DsieSt6d8d6ezJ8cydqRUZmTMfq3p1prKRX", 282.63795424 * 1e8}, + &TokenPayout{"DsiESTom3oYHTe3C5K3Kfu5wmqSAHBLUrYo", 282.63795424 * 1e8}, + &TokenPayout{"DsiF6ee3J4tsZWugU9QSniCHZKiGvEcjFdx", 282.63795424 * 1e8}, + &TokenPayout{"Dsif9LrCj2BzPQCCzq6TarC5EnLYRHgM9Lp", 282.63795424 * 1e8}, + &TokenPayout{"DsiFF1NPqB3bkg1DZBGaZNJTnH1h92LCjmd", 282.63795424 * 1e8}, + &TokenPayout{"DsifPMEjcNrZKM3szSKchHkwQ6LAT3BFFCB", 282.63795424 * 1e8}, + &TokenPayout{"DsiG53jSzKehepbjHNodER9kbiYHV237yRt", 282.63795424 * 1e8}, + &TokenPayout{"DsiG7PHVf2uWR4LHgtbEsAH2pS7g3o3GzgK", 282.63795424 * 1e8}, + &TokenPayout{"DsiGE5EkeKNjc86soaQBhrJ1FXrA98o8JtE", 282.63795424 * 1e8}, + &TokenPayout{"DsignBvtLQHfYsL3WH9Da6NVFfFyxC98d8W", 282.63795424 * 1e8}, + &TokenPayout{"DsigwhQGnypdew3NKxMTc7USShhGqJXVwEx", 282.63795424 * 1e8}, + &TokenPayout{"DsigyUCAzvQfbkjBXsjwoRpZFSuk9RCpfMy", 282.63795424 * 1e8}, + &TokenPayout{"DsiHmUr2YBBCjgWQoed1xow3PALBHNaFaTc", 282.63795424 * 1e8}, + &TokenPayout{"Dsihq9aigAXqJVBQaRY9n9ckudWLYdkopsB", 282.63795424 * 1e8}, + &TokenPayout{"DsiJfMhkkvVarZiNkkPJV4KTZEvhkLcmoV4", 282.63795424 * 1e8}, + &TokenPayout{"DsijHoL6tm9KUmSb4hFQogHK9qWQsUrmsqY", 282.63795424 * 1e8}, + &TokenPayout{"DsiJs9oZxQTUBL5A92WhyzPzsGqiaxJTjrE", 282.63795424 * 1e8}, + &TokenPayout{"DsiJSnhfHK4ynJx6foG6kn1cGxPuQsGoxpf", 282.63795424 * 1e8}, + &TokenPayout{"DsijUKZE9ZSQKH3k58rsRkvor3NSnzAfjTY", 282.63795424 * 1e8}, + &TokenPayout{"DsiKJqGCgjqNTKAApwaymsjYcgYrmWgvC8f", 282.63795424 * 1e8}, + &TokenPayout{"DsiKKJq1UJo5ojARvZ5Ddm22URsxp67suj2", 282.63795424 * 1e8}, + &TokenPayout{"DsiKMoXa6dzs1JPUgD72kgMvYm7QwDyZziV", 282.63795424 * 1e8}, + &TokenPayout{"DsiKsScyyj8PZTt3niwCabdAmSVYoas8r6P", 282.63795424 * 1e8}, + &TokenPayout{"DsiLBNQiRDrGFrjaJrW9GmAUoat2HvzrPzE", 282.63795424 * 1e8}, + &TokenPayout{"DsiLmJBVuN5z4tYwqXvhQCx5h5PVWLEyBKR", 282.63795424 * 1e8}, + &TokenPayout{"DsiM95ehaA13KrW2KLyaxv3agbM8Vgbq68h", 282.63795424 * 1e8}, + &TokenPayout{"DsiMbhB9NN4cEiwH4wRWof9jmFzfveu1xzB", 282.63795424 * 1e8}, + &TokenPayout{"DsiMPhC3sPa1u8qNmqmdWbYh4jbYrBNyruU", 282.63795424 * 1e8}, + &TokenPayout{"DsimR9JVTRCaK1BmLxRt4nWvM18xvhoUEu4", 282.63795424 * 1e8}, + &TokenPayout{"DsiMzaGnMNNReuxbHGw3oYF4ekfizNx5Gaw", 282.63795424 * 1e8}, + &TokenPayout{"Dsin281pxgWe2Xrkx1LdHr7nviE4apJcGA7", 282.63795424 * 1e8}, + &TokenPayout{"Dsin4mN6oFf4Mt9VSbU58y7THkEGpBXYJUk", 282.63795424 * 1e8}, + &TokenPayout{"Dsin4T6VckLbk4YtAg5z6Uu2hwbRij7wwCQ", 282.63795424 * 1e8}, + &TokenPayout{"DsindaWduDEgUbogJsMgb8TFk3TcMc6oyno", 282.63795424 * 1e8}, + &TokenPayout{"DsindvqqvNPVVnkXZ4Uu9oF2otsgrn66rv5", 282.63795424 * 1e8}, + &TokenPayout{"Dsing5DC1tDWsr69iE4b2TWP6YbFJnK3M8U", 282.63795424 * 1e8}, + &TokenPayout{"DsiNyQ54HhhyaYS96mUNLngKwYVZxsKxvsm", 282.63795424 * 1e8}, + &TokenPayout{"DsioE3aPaD4WWWW4zTuE86Xvat3MaNfBNTu", 282.63795424 * 1e8}, + &TokenPayout{"DsiooBjDnhS3mgHSAVfUuXBxuWcddHmywJr", 282.63795424 * 1e8}, + &TokenPayout{"DsiP9TfXpKBxrHt9X6pbcF4oHdfDyz3QUyD", 282.63795424 * 1e8}, + &TokenPayout{"DsipBXn1MmLvJivQWv4xXY3uqivT4YThk1i", 282.63795424 * 1e8}, + &TokenPayout{"DsipJ9bW72XT5NeypAiiBjvkdCVFwqRpjM2", 282.63795424 * 1e8}, + &TokenPayout{"DsiPXeH8ZLiojC3sRyqfsx49TD8gWV2cHrp", 282.63795424 * 1e8}, + &TokenPayout{"DsiPZCquwojsqDcpXZEqWBvyeyqaSdf1Xsh", 282.63795424 * 1e8}, + &TokenPayout{"DsiqGsQayuzPJaAhcq9LnVosXW5mF3cwQFo", 282.63795424 * 1e8}, + &TokenPayout{"DsiqHsBTaAM2fpu3DqASKmzwYj1Xta8ndAQ", 282.63795424 * 1e8}, + &TokenPayout{"DsiQox5myhiZoY33f7dtiJQUQCHbZVi561V", 282.63795424 * 1e8}, + &TokenPayout{"DsiqPkZ5abCSDrexgP1ZtRHfXJ99MYxpKPr", 282.63795424 * 1e8}, + &TokenPayout{"DsiqtymSvLnQVaLNGJUMqsi9HJzQCCxcggq", 282.63795424 * 1e8}, + &TokenPayout{"DsiqvSqUK2JFhuFewgGtTJfLzu9EpUFY9bs", 282.63795424 * 1e8}, + &TokenPayout{"DsiRAJSoMkq6EN6fvNm8jV1Ryv5sWPJMmTQ", 282.63795424 * 1e8}, + &TokenPayout{"DsiRfDC3Xr6c5PJw8Y19LeLR45FF7M8WRKD", 282.63795424 * 1e8}, + &TokenPayout{"DsiriLmoaorub68M5rQHsGCbVsKosZmGmHw", 282.63795424 * 1e8}, + &TokenPayout{"DsirTKiaHNF32HdPj2XZLvfnPBSMvgHF29K", 282.63795424 * 1e8}, + &TokenPayout{"DsirTY7QvqkshumVXR4UhHB4UALSk5qm79a", 282.63795424 * 1e8}, + &TokenPayout{"Dsis91gBEZCe2uJqykJMTa6Nz8nWQt4nbAG", 282.63795424 * 1e8}, + &TokenPayout{"DsishQQUewUJzLB8Fzu2KjNp5xV8sQQ6AAH", 282.63795424 * 1e8}, + &TokenPayout{"Dsisk9LePnivUWUMGMxhG8hrkP1F1bGHMoU", 282.63795424 * 1e8}, + &TokenPayout{"DsiSn62UvTKiY3neJu62gPJssQMWquAipeu", 282.63795424 * 1e8}, + &TokenPayout{"DsiSnwbqjxeCMPNCZSeLb6tjeVqw78no2YX", 282.63795424 * 1e8}, + &TokenPayout{"DsiSrL171CZSqrDqogrdr5b5QtePzUCnDJ9", 282.63795424 * 1e8}, + &TokenPayout{"DsiSvri16xEjy5jM1cGqVCiVKXLUQLzfLWd", 282.63795424 * 1e8}, + &TokenPayout{"DsiSzdfbM1a8yyUD2R7pHx8fgqfwURNa8oN", 282.63795424 * 1e8}, + &TokenPayout{"DsitezGqiSqQ1PiACEPxueQtidi9nJfo1zC", 282.63795424 * 1e8}, + &TokenPayout{"DsitQCUkPuA1HQprNbxXjjQrEsYamebFnUD", 282.63795424 * 1e8}, + &TokenPayout{"DsiUB9BFaqKia6AnbQDE3jtDNySSSdtSRVj", 282.63795424 * 1e8}, + &TokenPayout{"DsiuEfenoExUGE9xpiXDyG6JM51na9ZWFhv", 282.63795424 * 1e8}, + &TokenPayout{"DsiuhAizGaBgCtUxB1wf9mFuMGUphE1zhz3", 282.63795424 * 1e8}, + &TokenPayout{"DsiUQucLvmBdxXHfGTiMKgKGhzdgeCNG9vG", 282.63795424 * 1e8}, + &TokenPayout{"DsiUskspQ5vLvpFwqxv2XSUCzx5UF7jNcEF", 282.63795424 * 1e8}, + &TokenPayout{"DsiUWx2zMJVEHE1gxx5p5nTxcyDfrF6h92C", 282.63795424 * 1e8}, + &TokenPayout{"DsiuxtnDXTvdCCfmE87zuDx7u2Ln8X8XDDm", 282.63795424 * 1e8}, + &TokenPayout{"DsiUyKbTa9MQnv2vZGu2aaUPp1gPgDPSTaJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsiv7mKjaEBhZcAeZd2GUA4hLQbYHGg3bF3", 282.63795424 * 1e8}, + &TokenPayout{"DsivMyQRekfu4opDUWmQuMaQMmdtXjnECT2", 282.63795424 * 1e8}, + &TokenPayout{"DsivZc8RsMRaNgtM6RygsbTJQpcx2W6kNnu", 282.63795424 * 1e8}, + &TokenPayout{"Dsiw3XzqeXfCAbYjJCRcFY3nRmM75BpriGv", 282.63795424 * 1e8}, + &TokenPayout{"DsiW5kZyVC8P9ZLHaGKLjnXuQKSwRcTnnEc", 282.63795424 * 1e8}, + &TokenPayout{"DsiW6M7wJ4f6gQUGee1zHLaqNEXj6CUELv2", 282.63795424 * 1e8}, + &TokenPayout{"DsiwFX6ceCstSwbRHVCTXpEhHAFEjjRB7k6", 282.63795424 * 1e8}, + &TokenPayout{"DsiwGrz8PAhRPhd6JJGrs6KBvE5J5AjEj3d", 282.63795424 * 1e8}, + &TokenPayout{"DsiwPQ4Fp5AskHiA6rHiwvX4JyZMJ6gzJm4", 282.63795424 * 1e8}, + &TokenPayout{"DsiWrsfrVDUwfGrieTYti1NT5dSWjn6kgR6", 282.63795424 * 1e8}, + &TokenPayout{"DsiwvQBrjF7Km2aV72v4hCwg1facXwoTHVN", 282.63795424 * 1e8}, + &TokenPayout{"DsiX9aASYAyc5sHkQHbnm2SLXwYV37yVHS5", 282.63795424 * 1e8}, + &TokenPayout{"DsixcA1cNva85HQEaZG7YJE6HAKySMZK3Wh", 282.63795424 * 1e8}, + &TokenPayout{"DsiXmrxowoeM8rBXL5ZSmBJvwJmgLb2Dra6", 282.63795424 * 1e8}, + &TokenPayout{"DsiXNQBk2vNS69jcmxaCrRX2673T9JZpbtR", 282.63795424 * 1e8}, + &TokenPayout{"DsiyDjRq11ibQjEqzem1bh1AqWTV6w6BzSx", 282.63795424 * 1e8}, + &TokenPayout{"DsiyDzAAHNaFL1vBi9iYR63eWpn9eVn9X77", 282.63795424 * 1e8}, + &TokenPayout{"DsiyMieSSsEh8YeTPDXj4fh9tZSwkASksvj", 282.63795424 * 1e8}, + &TokenPayout{"DsiYvWiUT3tR9YmSA8rFiS7byjyZBWz9nBY", 282.63795424 * 1e8}, + &TokenPayout{"DsiyX4KGExts2jxcq3KDH47ajxTkTgaZegF", 282.63795424 * 1e8}, + &TokenPayout{"DsiyywVDq3vezXDnQEfBoHiQFnPHRcftGFv", 282.63795424 * 1e8}, + &TokenPayout{"DsizCRsmwJkcuQiTePRiuHWpdmChzR58zFx", 282.63795424 * 1e8}, + &TokenPayout{"DsizFeGSPKGf5Sy67J3eQpP6pAeqY1UeMig", 282.63795424 * 1e8}, + &TokenPayout{"DsiZsdz8fbQoX5tor442ZmBq8w9pXGyVMwg", 282.63795424 * 1e8}, + &TokenPayout{"DsizWEWHkFH8egbWACxpwYrpQAxMVfFxtoH", 282.63795424 * 1e8}, + &TokenPayout{"Dsj1YvuzV5hJcEqeYbzvFo9C2LphdXpXcp9", 282.63795424 * 1e8}, + &TokenPayout{"Dsj3k9Lrqs1DaY9rNzVy4zfgZcS68dz8QsY", 282.63795424 * 1e8}, + &TokenPayout{"Dsj3wUkxuDrnWuesRhc8qUQSvYVeajNMEJx", 282.63795424 * 1e8}, + &TokenPayout{"Dsj4Q1uxH7t3Tu1Er4iaKaApAASyXhZUsFE", 282.63795424 * 1e8}, + &TokenPayout{"Dsj6hYcNjHtbcZ3cK3PFwSEThUA73cYxbbY", 282.63795424 * 1e8}, + &TokenPayout{"Dsj6jk9CbyzDuSEbfymbyHuc9Lzpw11AzR3", 282.63795424 * 1e8}, + &TokenPayout{"Dsj6nn544EvUw5KXXViZDshDhSsLDXA8X1u", 282.63795424 * 1e8}, + &TokenPayout{"Dsj6TJCRNUChBtDvfCLuHtMD4NaY8DZFrUg", 282.63795424 * 1e8}, + &TokenPayout{"Dsj6y24Me92fqSyrrZxicPktZxYZspQ9jeZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsj7LSp2JXgFqMMgZ7qnm99vYTnJocKHTjW", 282.63795424 * 1e8}, + &TokenPayout{"Dsj7TsJipy8phMZfUZiKM9GfCoEgsuw3p7K", 282.63795424 * 1e8}, + &TokenPayout{"Dsj84g67rwjCQ9XAuBEmm2sFTu6AyGaRAaf", 282.63795424 * 1e8}, + &TokenPayout{"Dsj8CerADyohFG8vEuo9Pgbf1zMv1P1oqs1", 282.63795424 * 1e8}, + &TokenPayout{"Dsj8YDUPCDWDQpThrkEkfsNjpCT74RniDha", 282.63795424 * 1e8}, + &TokenPayout{"DsjamxtjDMgsGjUEGvje1FVB9ioT3a3Jvks", 282.63795424 * 1e8}, + &TokenPayout{"DsjawMjWVVNWZjDjfD19hkW2S49bTqaQimn", 282.63795424 * 1e8}, + &TokenPayout{"Dsjb6SCmsRVjexPrwQ6UgCwHojDtwLaBjvy", 282.63795424 * 1e8}, + &TokenPayout{"Dsjbz7QV8wvMMVMPbqqFB76WUtG6icENEcH", 282.63795424 * 1e8}, + &TokenPayout{"DsjCfaSecnvv2wBrX8UeqBmHfoCPn1bCUVb", 282.63795424 * 1e8}, + &TokenPayout{"DsjcSYf1RaSDUPoqJ4HUcj93nefcZwEzLEr", 282.63795424 * 1e8}, + &TokenPayout{"DsjdjiedQm8WpMyJtmgdvwAi4aM7Mn1ivpH", 282.63795424 * 1e8}, + &TokenPayout{"DsjDLrPuCF9M7Sfm1y3hXbiGgPHnae5yHJc", 282.63795424 * 1e8}, + &TokenPayout{"DsjEawVsKJgbEaqGzRB1mBtVQsTjqbw58Uj", 282.63795424 * 1e8}, + &TokenPayout{"DsjED85gvmDUSh8HgSjDw2kh7C2ECxjDnQF", 282.63795424 * 1e8}, + &TokenPayout{"DsjEDxjvZKAcX91FBTNMetSt73sTkzWkbT7", 282.63795424 * 1e8}, + &TokenPayout{"DsjeFTtBcAeeBfMPC9Ybo1P9kcHn4fApeyv", 282.63795424 * 1e8}, + &TokenPayout{"DsjegYHQMW3PrWPrnDMrN9b39Tr8MVMhkHs", 282.63795424 * 1e8}, + &TokenPayout{"DsjEHECid6Yyv9iUL36UfqfTj3pUDemuQoE", 282.63795424 * 1e8}, + &TokenPayout{"DsjeqAfQQm8mcL3TACoV5s468mtDgxgrcCT", 282.63795424 * 1e8}, + &TokenPayout{"DsjESJFh4QeazffaMM4u6bSvbUFqZKdquor", 282.63795424 * 1e8}, + &TokenPayout{"DsjEU4tkZW18E6VJnfo22qqdndE8ZCPK7EH", 282.63795424 * 1e8}, + &TokenPayout{"DsjeUST91ZFkPk8J4fDXAfiMJbhxh2Mmu2U", 282.63795424 * 1e8}, + &TokenPayout{"DsjFiFkC9YnMSPhK9KV31BBxwXY8GRqETBi", 282.63795424 * 1e8}, + &TokenPayout{"DsjFpGVLLFWCnq57uqH98f9dJhFvJyFBpmn", 282.63795424 * 1e8}, + &TokenPayout{"DsjG7NfBdiLaiDrBtA526tRGVbpJUkZJpqX", 282.63795424 * 1e8}, + &TokenPayout{"Dsjg9sfdo7n6ZyZUX2RQYax7UWvmnvyTsXZ", 282.63795424 * 1e8}, + &TokenPayout{"DsjgdMr33fk9X5dcXgStS9kbvLvQdFf81dv", 282.63795424 * 1e8}, + &TokenPayout{"DsjGEnHf9BV5ZS5FZYeZBNYiD4x6Kp8oxfe", 282.63795424 * 1e8}, + &TokenPayout{"Dsjgjrfiw93G9MebwPJMRjJRBxrT518jdZB", 282.63795424 * 1e8}, + &TokenPayout{"DsjgkiWZAfGRrnMH8wfcn6gAsyviKwA9ifP", 282.63795424 * 1e8}, + &TokenPayout{"DsjGMpKpXVTfJLvnxyrLMC72y8KWL7SAQCB", 282.63795424 * 1e8}, + &TokenPayout{"DsjGRWvqaGPRaQfYCMp9N7ZMEozzuoLiygr", 282.63795424 * 1e8}, + &TokenPayout{"DsjGVRRNkWLKcLNt9GBiPbGUky8AeJNx8aT", 282.63795424 * 1e8}, + &TokenPayout{"DsjHFRW2ufk9z6ekhVGiTZveJpYcXa6fUwC", 282.63795424 * 1e8}, + &TokenPayout{"DsjHgMzJZjMEnXmeuhcUxD2rEGfUNbRHhRM", 282.63795424 * 1e8}, + &TokenPayout{"DsjHiWq4EcjPLKfiTt9eBRR9iPJ4z82J1wY", 282.63795424 * 1e8}, + &TokenPayout{"DsjHMgpGwng8JodwvEJndBuYYj2GZVdcyqj", 282.63795424 * 1e8}, + &TokenPayout{"DsjHpqpxD8aaNTa1CKs3EA2GGS69mMRg1v5", 282.63795424 * 1e8}, + &TokenPayout{"DsjhRTfHgHQQtd1Ci3z2XGYRDq1pWNfsy9U", 282.63795424 * 1e8}, + &TokenPayout{"Dsjiu3mWqrMJEmkqFTEDasqzaVVar49UUXk", 282.63795424 * 1e8}, + &TokenPayout{"DsjJBVKYvPH85229Z2XV1U6yN5PfpNYpxe7", 282.63795424 * 1e8}, + &TokenPayout{"DsjjM62BLNtAf4qNY7zp5KfipivuCcW767b", 282.63795424 * 1e8}, + &TokenPayout{"DsjJPTmckr8Mj6VSqoEbCDFGSHEFAszcnpy", 282.63795424 * 1e8}, + &TokenPayout{"DsjjsFC8sEzWEQZdfvdhn4b2BebH1CuRJWE", 282.63795424 * 1e8}, + &TokenPayout{"DsjJswbcHZiVzPLaykSKeqrdjRdV32RXVou", 282.63795424 * 1e8}, + &TokenPayout{"DsjJUFrdgo14h6bCC9NHbs9iGUaNxKpydtn", 282.63795424 * 1e8}, + &TokenPayout{"DsjjWx1dGWSNtKT6BbAbWDRQnJWUEdG6gKg", 282.63795424 * 1e8}, + &TokenPayout{"Dsjk9Jn4yxYcixLp9Xe7sPfZQGxZh16F3DD", 282.63795424 * 1e8}, + &TokenPayout{"DsjKnagcTCjSzPPT96GrfyCKCcmvhvQCKhE", 282.63795424 * 1e8}, + &TokenPayout{"DsjKr5gqxtBJcp1GVdzSptMxWcBGRPRvogd", 282.63795424 * 1e8}, + &TokenPayout{"DsjKwiWUnhz2pkMUdbrBXFrWbEYNRyckjbp", 282.63795424 * 1e8}, + &TokenPayout{"DsjLET9vf29xF1CzFFTXCGqSxgQLqfnEiyw", 282.63795424 * 1e8}, + &TokenPayout{"DsjLQo18jZPefsw137fPqMd5KxdBCnv463F", 282.63795424 * 1e8}, + &TokenPayout{"Dsjm5fcn2mxgXzy1ZYQZ5nMttwiXhk8cLVL", 282.63795424 * 1e8}, + &TokenPayout{"DsjmbAfUyAbJKiMZ8KM1yHyRwqCCxdaerLd", 282.63795424 * 1e8}, + &TokenPayout{"DsjMv8v4FbFdKrN476soLyHbhtPexT2KeN3", 282.63795424 * 1e8}, + &TokenPayout{"DsjmyQ6qEG9FEKSLBNsukwU6eTez2ymsUyF", 282.63795424 * 1e8}, + &TokenPayout{"DsjnE2NJLP9LqGjqchP5noY1noczpFsc44a", 282.63795424 * 1e8}, + &TokenPayout{"DsjNKRMxK93UibBmdHg3DVDP3qWbKEPLpbU", 282.63795424 * 1e8}, + &TokenPayout{"DsjNqdzk26PWGj37rDeX9kPgz4coVGmUtz4", 282.63795424 * 1e8}, + &TokenPayout{"Dsjnsm6mSWxmh4NSCboNTwWisexH3zs2WUM", 282.63795424 * 1e8}, + &TokenPayout{"DsjnVrixJPjMPtdn6hzyVz36kzL3hqsMaRd", 282.63795424 * 1e8}, + &TokenPayout{"DsjoaqKeahij8vGmYBG3gkAumQqjzinzwJF", 282.63795424 * 1e8}, + &TokenPayout{"DsjonfH3t4AWKb1z1Vxp12JP8CCTiSXTe4z", 282.63795424 * 1e8}, + &TokenPayout{"Dsjp8gHL9BgEQVitou5Gkdb1owcjtwEVL1o", 282.63795424 * 1e8}, + &TokenPayout{"Dsjp8LLRSEQ7tgfacHNCiWMkZJxC3rNKx4S", 282.63795424 * 1e8}, + &TokenPayout{"DsjPgZ5SWFMaxfCdaQ5rG14mpPgaL7AgBCb", 282.63795424 * 1e8}, + &TokenPayout{"DsjpTPoQ7pUxVuBaKGoPUykCsv2Dzfeubyj", 282.63795424 * 1e8}, + &TokenPayout{"DsjPUhgc6zXrP2LT5cDct5NVKKQhNiPotrn", 282.63795424 * 1e8}, + &TokenPayout{"DsjQCd2WsPpN1wm6t2LeTxUEzZpEYf25Jf5", 282.63795424 * 1e8}, + &TokenPayout{"DsjQP81MQUZiqActAqwFzuvKgWdXkHHnvLS", 282.63795424 * 1e8}, + &TokenPayout{"DsjR7pVsZpbuFdAxicesVPLnh2YDfer3EUD", 282.63795424 * 1e8}, + &TokenPayout{"DsjrbN6jrbwsfqAzjrkSbqSmh8Ee1qKDYDT", 282.63795424 * 1e8}, + &TokenPayout{"DsjrHbVVGakprTWHMRgxMCTSRrN5ZfiBJSt", 282.63795424 * 1e8}, + &TokenPayout{"DsjSJSDucC3UTAvEvRPi43FF8mWKKvNd3i8", 282.63795424 * 1e8}, + &TokenPayout{"DsjSpN4PgPvMqQCMXfpFPXgPnXbPpUDMBJg", 282.63795424 * 1e8}, + &TokenPayout{"DsjsSZafmSZEiARzWpYnttPJxP9Sp315ae7", 282.63795424 * 1e8}, + &TokenPayout{"DsjSz5jarbVBZ8vDH8T2BMMUrP6rF9ms1E3", 282.63795424 * 1e8}, + &TokenPayout{"Dsjt3foWrEHFfS5CXg6secYZCx2GcthS4Pn", 282.63795424 * 1e8}, + &TokenPayout{"DsjtiLVbv6w2XpSZPPdz7FTzTC13kCSHbjR", 282.63795424 * 1e8}, + &TokenPayout{"DsjtL6A3HBd5iBrDMMAzraPcyj7YBFaqj29", 282.63795424 * 1e8}, + &TokenPayout{"DsjturhsLAoocAs5Pf7LQx3WYxf4XMZqgpP", 282.63795424 * 1e8}, + &TokenPayout{"Dsju6aqykCvwFNKCAB87qFYWnyhTri2YoZp", 282.63795424 * 1e8}, + &TokenPayout{"DsjujJD21BgqvNPDKEoyGKw6JYyZ7uB5YsX", 282.63795424 * 1e8}, + &TokenPayout{"DsjuNYfNWgHWyWr25QptXzgcuSXKFWjnMGf", 282.63795424 * 1e8}, + &TokenPayout{"DsjUxBuEq5Jyr4eZ6ZAgF9wa5XdtBjcS5wS", 282.63795424 * 1e8}, + &TokenPayout{"DsjuyL1p91Cq7sEqdkNWx8Mf8vQmnB9EMSG", 282.63795424 * 1e8}, + &TokenPayout{"Dsjv6tHncFhbPAtvFWC2LzZxfNDt52swguL", 282.63795424 * 1e8}, + &TokenPayout{"Dsjv72mkgmzfgQp3dW8xmJxrKGpAP3JLbf5", 282.63795424 * 1e8}, + &TokenPayout{"DsjVAbvzAwUdSFKjUKm9N5P2GqcDqBk2Fwg", 282.63795424 * 1e8}, + &TokenPayout{"DsjviR9j8jEmzfjQhXpHGSzVMwyTaU9CvRc", 282.63795424 * 1e8}, + &TokenPayout{"DsjvMGFPNkqUNP4VfigzdktE9vKEkdeL53x", 282.63795424 * 1e8}, + &TokenPayout{"DsjW69FoZ7PN3fZD34qbTxvfyr9zF2q2axr", 282.63795424 * 1e8}, + &TokenPayout{"Dsjw9PgFiVtCjgJJmow6aMfPH7h6T1yYWht", 282.63795424 * 1e8}, + &TokenPayout{"DsjwaTrL4GqLzVUnDKfnUhJNxfVPvza6j5b", 282.63795424 * 1e8}, + &TokenPayout{"DsjWkLnWWMMkQA4WPHTabu6r3RjaQuTrRMh", 282.63795424 * 1e8}, + &TokenPayout{"DsjwRomRtGQNjffxCGD4962ncDaSEktvmKc", 282.63795424 * 1e8}, + &TokenPayout{"DsjwRtQ9FtzF2tqRTGhSMnbH3soNn8KAK5p", 282.63795424 * 1e8}, + &TokenPayout{"DsjWz7XytiPdu2UvGs6GnUK8DCvJ2Yk132r", 282.63795424 * 1e8}, + &TokenPayout{"DsjxRv96JSGfSv7Xeg19CdsRULdK6ruLKQD", 282.63795424 * 1e8}, + &TokenPayout{"DsjxxeLeuQB1xDNWyZrKyhtCzGH7n9e8o3N", 282.63795424 * 1e8}, + &TokenPayout{"Dsjy1bQt1WhZnpdYJ2e2vNtHdFG8qb2YY45", 282.63795424 * 1e8}, + &TokenPayout{"DsjYkKbCcCESmgr3Y2UQUmXNw12GQCpSStf", 282.63795424 * 1e8}, + &TokenPayout{"DsjYUU2gmsGZaRmJghT6KQFJzVfJFor19ag", 282.63795424 * 1e8}, + &TokenPayout{"DsjyVLvAYsgAJd78QQyoYTQ7NPj21GYVbqH", 282.63795424 * 1e8}, + &TokenPayout{"Dsjz4WUVJBJrzp85xxEjEnuRDJriRLDYFpu", 282.63795424 * 1e8}, + &TokenPayout{"DsjZcHM1myXSnoav5wCqLRKZqhWJ8qiJMsp", 282.63795424 * 1e8}, + &TokenPayout{"DsjZe4oABSqihzUsstejTWdhbGaUg822eoK", 282.63795424 * 1e8}, + &TokenPayout{"DsjzELyq4aMRHteFmKuJLQNrzVWsdbGvUVg", 282.63795424 * 1e8}, + &TokenPayout{"Dsjzh2NrdA7JCaEtpem2vVh1kKYKvFVENxf", 282.63795424 * 1e8}, + &TokenPayout{"DsjzvbBqxrjdu4CNjRevzeaNXL2iokbMLMx", 282.63795424 * 1e8}, + &TokenPayout{"DsjZYbYbzqvCerff8kuDJAiAvcNdoi68dY5", 282.63795424 * 1e8}, + &TokenPayout{"DsjZyDdALXa3ZAYx36QaybAFSYQBVTm2bSk", 282.63795424 * 1e8}, + &TokenPayout{"DsjzzyDUXbJHxR5guyEFrAxCDnaT61s1eCA", 282.63795424 * 1e8}, + &TokenPayout{"Dsk183orPX7KV65zFy3ZEUSTZYUqTeXw85Y", 282.63795424 * 1e8}, + &TokenPayout{"Dsk1DWs7bTyznmAGtyQhZSGJe44xuEbKC8L", 282.63795424 * 1e8}, + &TokenPayout{"Dsk3iDkm8oCrQa82cgynfjntxYQjtDE7HD9", 282.63795424 * 1e8}, + &TokenPayout{"Dsk4dzRjut9g3Ckvi8Un94MvmCc7edLM67b", 282.63795424 * 1e8}, + &TokenPayout{"Dsk4FvLe4KGMeVYiQ3CAQ1iiMZCamiaigM6", 282.63795424 * 1e8}, + &TokenPayout{"Dsk5AvimNgioaq8k8TAXxEVpDFy3NbHxqVh", 282.63795424 * 1e8}, + &TokenPayout{"Dsk5V4L9Aayp9rT3WTQkW6b1SeuH7bg1kr3", 282.63795424 * 1e8}, + &TokenPayout{"Dsk5vYv8KbrcWVv1S3fcfukhdhxA3Lvdemh", 282.63795424 * 1e8}, + &TokenPayout{"Dsk6SniPG1DFw6R1BtVmHHuaUSUMHUkJ8Nv", 282.63795424 * 1e8}, + &TokenPayout{"Dsk6VsMZbyVovvKKUuFP4s8Yr39hELd7Z6k", 282.63795424 * 1e8}, + &TokenPayout{"Dsk72kUpzrEwPbBJLT2QbkDuNBduaA1UQ45", 282.63795424 * 1e8}, + &TokenPayout{"Dsk7e3pj78NQqnUMiaZFt2fRXbgKyLfd4UG", 282.63795424 * 1e8}, + &TokenPayout{"Dsk7GnpgFj1Zk41AtRQWYUVCuTCqmVDiH9e", 282.63795424 * 1e8}, + &TokenPayout{"Dsk7W3UjbCgaFSCZhT1Ke53QsFn1dsRjy1X", 282.63795424 * 1e8}, + &TokenPayout{"Dsk9HRZCjRzrfYxbpUyc8nBCUo6cVEDa1Za", 282.63795424 * 1e8}, + &TokenPayout{"Dsk9yzEy2LTWKavALaR4oxiAJhLJ2u625UV", 282.63795424 * 1e8}, + &TokenPayout{"Dsk9ZN1qw3WJnPDa8qheTC4JighFZM2N9Du", 282.63795424 * 1e8}, + &TokenPayout{"DskAuBPJuwyQ3YYm2T5tJE9vYdNecFjDYp1", 282.63795424 * 1e8}, + &TokenPayout{"DskaXdcqAP5A6LhRewukCm7cMTYAYDiCWKB", 282.63795424 * 1e8}, + &TokenPayout{"DskBDbNYqK6hkhr2vAFkUGZb7H27f4PQyUu", 282.63795424 * 1e8}, + &TokenPayout{"DskboVgi2XYcUqYiGmBdHvXnzcEkJ8TF59q", 282.63795424 * 1e8}, + &TokenPayout{"DskbuRYQp8aR8nHUQ7iSCXbpHWtjbfYaM3B", 282.63795424 * 1e8}, + &TokenPayout{"DskC2LEt8an8o2ivGhGivPziBBeoMvYkCLp", 282.63795424 * 1e8}, + &TokenPayout{"DskCnXmkX1TXvNeEzxYkrhCAJVEZkdPpLTG", 282.63795424 * 1e8}, + &TokenPayout{"DskCokQAq5AaZNc8964PUbkNS4ST2NFVZc8", 282.63795424 * 1e8}, + &TokenPayout{"DskdAu5arPrr5Sk8f2StjZBs1tHDJoiVKc9", 282.63795424 * 1e8}, + &TokenPayout{"DskDtE83qW1xvUjghRNAEB6gvwsPgpwQVsf", 282.63795424 * 1e8}, + &TokenPayout{"DskEFaY7uXDmwhDAMDyaQ8ToySz8YB4g8xo", 282.63795424 * 1e8}, + &TokenPayout{"DskeisPf8Q11ubfJYx1GWFoBEfCMSUwEeGA", 282.63795424 * 1e8}, + &TokenPayout{"DskeYxQ5uW8JXKdJkZTGjp6HpkCmTP9aGmN", 282.63795424 * 1e8}, + &TokenPayout{"Dskf1hh7iZ3EpSSqfcBmeks1QCcFhm3Hcv8", 282.63795424 * 1e8}, + &TokenPayout{"DskfqLAAr1zZHjGmbcxRC9AdkQSnTJZdBTf", 282.63795424 * 1e8}, + &TokenPayout{"DskFUgRPCEuzv6dy7sZ5oCFtYLteZFPjBhA", 282.63795424 * 1e8}, + &TokenPayout{"DskfVKckTJnzLMoKD7eVeEQXubGNc97RJoZ", 282.63795424 * 1e8}, + &TokenPayout{"DskFWD7UpsnjtwnHweBJRRHUgNeRBdLe8ca", 282.63795424 * 1e8}, + &TokenPayout{"DskgZqWh5s8UAN4FZfYb6FnV18eFphAcA7U", 282.63795424 * 1e8}, + &TokenPayout{"Dskh9d1v6KpiAbh1v6w6XZdGRN1Hv1x5U1B", 282.63795424 * 1e8}, + &TokenPayout{"DskHbuzfB61kijVn6xs1JPBAg7a6miWrSAm", 282.63795424 * 1e8}, + &TokenPayout{"DskHdGeY3twncLbADKxaKop981KPQxVhgGA", 282.63795424 * 1e8}, + &TokenPayout{"DskHKPvVmmdwGps8QLKsV3NgG1W5CLwjuQM", 282.63795424 * 1e8}, + &TokenPayout{"DskHPWo1aFi8FyaKsJMAWCKqfy4v5iGwU95", 282.63795424 * 1e8}, + &TokenPayout{"DskiifLATx3ibni5uU3avq5ytfwipdvbD12", 282.63795424 * 1e8}, + &TokenPayout{"DskirtDoQxHFFiUN6mHpD8XBBwutA14oDoh", 282.63795424 * 1e8}, + &TokenPayout{"DskJ9BguiRPZqXJxoEGgzCxvGCPF342Pcqs", 282.63795424 * 1e8}, + &TokenPayout{"DskKDTzN4LpCDsrK7tvDFDaFphumKFJqwvR", 282.63795424 * 1e8}, + &TokenPayout{"DskKhwC2LYpBuEMHr21ZNdagtd5FY845dRW", 282.63795424 * 1e8}, + &TokenPayout{"DskkjWMKmMdnxHcrSWBWic9JHUWC74Zoyv6", 282.63795424 * 1e8}, + &TokenPayout{"DskkLidQCxByQ3TQhtofA5vCaPqLBnmbwnR", 282.63795424 * 1e8}, + &TokenPayout{"DskKv2hyfJ1UhZX764YRLzCKkx2xGWnsbm4", 282.63795424 * 1e8}, + &TokenPayout{"DskkW7NKQL8ERFDnb3QwE8B5k5zhuCbnB2D", 282.63795424 * 1e8}, + &TokenPayout{"DskL5g4vGa4BfUADvnV8gfhfRgMTciQ5YcR", 282.63795424 * 1e8}, + &TokenPayout{"DskM1Lt52snV26jChRmZw9TJpuZhHeLtr3y", 282.63795424 * 1e8}, + &TokenPayout{"DskMdUmRtRjbrrsu1RXrJTsF7Y3eXoi1Mdb", 282.63795424 * 1e8}, + &TokenPayout{"DskMf45J3iys9isgdPzxPvhaUamgaUzbjHi", 282.63795424 * 1e8}, + &TokenPayout{"DskMJU92T8yPnPDk89s17BpxqeJTTvUAPpW", 282.63795424 * 1e8}, + &TokenPayout{"DskMKG9TVm7uPuXtPNoFm5PWUd17yQ8D8C2", 282.63795424 * 1e8}, + &TokenPayout{"DskMQWUTbFQ81bmShvMF8MNAAtiUhD1XVar", 282.63795424 * 1e8}, + &TokenPayout{"DskmTBiVL17nV2HMz9WjEPdqDA1mMBto56u", 282.63795424 * 1e8}, + &TokenPayout{"DskN2X7Ma1JMD5PgWwjjZWRXLFh3SnyYXwf", 282.63795424 * 1e8}, + &TokenPayout{"DsknfdXfSNFxb1WGHY4cMgUtPp9a5CWe3tK", 282.63795424 * 1e8}, + &TokenPayout{"DsknpAF8uMmEv3cNuVLHtSGV8Mq3hG3Xa9Z", 282.63795424 * 1e8}, + &TokenPayout{"DsknQybWQPsw9wQge3fwYkpQr6T8D8R8UV4", 282.63795424 * 1e8}, + &TokenPayout{"DsknxrpkdCzgsWdJBGo4ZgYHZcXPZczUFCg", 282.63795424 * 1e8}, + &TokenPayout{"Dsko7CZaYDPyaBFcvoeUzaPyrNM75FrPP4e", 282.63795424 * 1e8}, + &TokenPayout{"Dsko9YiL8YdMNAwZ4KexLiRJYoyFHsZVywS", 282.63795424 * 1e8}, + &TokenPayout{"Dskoc5pJkMSRcnngrkJRrn8UryiLDy3XcmA", 282.63795424 * 1e8}, + &TokenPayout{"DskoJYacivZcnbPvAJkwa9pU7k1q1U27Hdf", 282.63795424 * 1e8}, + &TokenPayout{"DskP3WAseRHJFLM9SxrEEQbr6YchaU7aK1V", 282.63795424 * 1e8}, + &TokenPayout{"DskPaA1yzGU3iuQXH8mtcLKMTByn5oKs1gz", 282.63795424 * 1e8}, + &TokenPayout{"DskpAMSkVhatiS1CJDaVaWKSwwjzzvNuo9q", 282.63795424 * 1e8}, + &TokenPayout{"DskpMjKh9PhorCc5f8qaud8Kt4o63aHQRnm", 282.63795424 * 1e8}, + &TokenPayout{"DskpuAFh4prtMmHybpwZXMMDKkLmnCsP2Y8", 282.63795424 * 1e8}, + &TokenPayout{"DskpVwgWDahLZMm5GfipEEjP1DvpGihDWbR", 282.63795424 * 1e8}, + &TokenPayout{"Dskq36Ayj7dMXM8YziMhqLe63HXDgTkLcxH", 282.63795424 * 1e8}, + &TokenPayout{"DskQ4MPwcrHKXmMNuDJAobs2NA3UJud7Zxt", 282.63795424 * 1e8}, + &TokenPayout{"Dskq5MHnGp7pSLcnqKa4cXMnZ9kR1XLWieZ", 282.63795424 * 1e8}, + &TokenPayout{"DskqwVdhLFp5Z8vbg6tkMprEbe1J9UMvbZw", 282.63795424 * 1e8}, + &TokenPayout{"Dskr2WWpn96w468y4TzqBrosDnsmbqhohnr", 282.63795424 * 1e8}, + &TokenPayout{"Dskr3gUtZgbkUgXUGnPJq8qmvGR8UKfRViP", 282.63795424 * 1e8}, + &TokenPayout{"Dskr4mjXh1RvqxesVbRxU7nzXKcAx6nJvGw", 282.63795424 * 1e8}, + &TokenPayout{"DskR9AYnTNtd4FKiWtxVEu4Z72sx3TyGQSX", 282.63795424 * 1e8}, + &TokenPayout{"DskRPKbC1C6b1XzVKgEXAKyGN9PG9Cigq3e", 282.63795424 * 1e8}, + &TokenPayout{"DskRTF3fJRPdxivjQSPBJR8K2T8ELzwbRnw", 282.63795424 * 1e8}, + &TokenPayout{"DskSbNWCobBFpLVMz5vqzn9iCf8thwa7bjD", 282.63795424 * 1e8}, + &TokenPayout{"DskSFeWrJ4ZDvYD3vhhVNhAum9DkCdwwJse", 282.63795424 * 1e8}, + &TokenPayout{"DsksGKQRXsTsPLYNv28BQ2LkNvB87bN1eex", 282.63795424 * 1e8}, + &TokenPayout{"DskSKBCVHoHnXQviRcDC3F21q4GyGLWzLba", 282.63795424 * 1e8}, + &TokenPayout{"DskSUDGyxwhKHdEXjKEpEYiTtR8HRshoWfu", 282.63795424 * 1e8}, + &TokenPayout{"DskT64J6qjtHEVUqpLqckaBXERuascnBVU9", 282.63795424 * 1e8}, + &TokenPayout{"DsktEw6wberwi9wu7uSULRfdGDdEb1qBgbo", 282.63795424 * 1e8}, + &TokenPayout{"DsktpoMd7YQ9NuLY7Ln3n5BLwhdxvo9HAdz", 282.63795424 * 1e8}, + &TokenPayout{"DsktUji8kqxJcsFFgMy2dq9PbMifMg56q49", 282.63795424 * 1e8}, + &TokenPayout{"DskUzkffhxGfHAcr2vzuhkBF1uJdPrQirKy", 282.63795424 * 1e8}, + &TokenPayout{"DskUZLJtfxhFDCxpCHBA41TTVeDBmzctYGb", 282.63795424 * 1e8}, + &TokenPayout{"DskvakTXRspevyuoCHgtovi5MvFe9jCNxM6", 282.63795424 * 1e8}, + &TokenPayout{"DskvC8QWXJk1iuZW8WuCbWd2KHPBygan8DH", 282.63795424 * 1e8}, + &TokenPayout{"DskVGrB8D9fuJrwqRYJBM9Dq3sAtsTC7vev", 282.63795424 * 1e8}, + &TokenPayout{"DskVJN6w5ZJp21b1axcsuQrgTyiaH3X6ySL", 282.63795424 * 1e8}, + &TokenPayout{"DskvPGH7VgdahAHYt6cawwMFfyLKBh5t8XT", 282.63795424 * 1e8}, + &TokenPayout{"Dskvq4ecTbEf9NNsiWXkaEXdm19RwEV9KZ1", 282.63795424 * 1e8}, + &TokenPayout{"Dskvuur8XtkiwkurmfPdTCx2m2sfks6LYyt", 282.63795424 * 1e8}, + &TokenPayout{"DskW3fAHFX7DtWRoLqykLfD1S8vsUpA6rb8", 282.63795424 * 1e8}, + &TokenPayout{"DskWAnriu6N2vrH27Ej5QyYK26S1xwHtt2r", 282.63795424 * 1e8}, + &TokenPayout{"DskwQbeMVSu45g1dZHhGjGY1UQHKSkzYF7Z", 282.63795424 * 1e8}, + &TokenPayout{"DskwtExgzsMQiBE2aSVBvRyFA3cWkJQemce", 282.63795424 * 1e8}, + &TokenPayout{"Dskwtm9RqgW1jXhB3AdwD5Fd3poybrguhrD", 282.63795424 * 1e8}, + &TokenPayout{"DskWz2Tsfb5Q1UcBL2ozCZWgB5TxjVokQJp", 282.63795424 * 1e8}, + &TokenPayout{"DskX6N1k9JCEZ5VnJM5mWBYFGFc3j58g9X9", 282.63795424 * 1e8}, + &TokenPayout{"DskxiKas2vGMyDNBvSTteasvszRmMiLy4Mf", 282.63795424 * 1e8}, + &TokenPayout{"Dskxo3mvUSzjttAHrpjgbNJJXd8ffrdCeA5", 282.63795424 * 1e8}, + &TokenPayout{"DskxSk9UGnf9kzEikNCJQbP1YRsdLv76j5r", 282.63795424 * 1e8}, + &TokenPayout{"DskxVrYoadW8oZKGpxTQJuZf4SwH4JR3dWc", 282.63795424 * 1e8}, + &TokenPayout{"Dskyat8zFBZGFjQDAirZd9NUodegBACyoXA", 282.63795424 * 1e8}, + &TokenPayout{"DskYFPtNiBgkC7U6rKWy4p8vTd51Pte8uSQ", 282.63795424 * 1e8}, + &TokenPayout{"DskyStjj8sBEnVR69sR2AHqZwGdpMypRJZj", 282.63795424 * 1e8}, + &TokenPayout{"DskYxg3wuiCq79KBrbDKbFRD17fkqytr4ej", 282.63795424 * 1e8}, + &TokenPayout{"DskZE5shN65EjVmJBeruyW95Vud73ANLTxP", 282.63795424 * 1e8}, + &TokenPayout{"DskzFWS2XTuWZRdz1S4x28EMSus2Hqgw4dH", 282.63795424 * 1e8}, + &TokenPayout{"DskZiG1zJen4UyWtTNGYzRtc3ES8mHAb5c7", 282.63795424 * 1e8}, + &TokenPayout{"DskZitj49RYtUtmQqXDjmVetCFfhqbFavM6", 282.63795424 * 1e8}, + &TokenPayout{"DskzMgVUeDtmoy3784LCy2gBsUCddJqNira", 282.63795424 * 1e8}, + &TokenPayout{"DskzNAtP6wvBxG9zAv3D78emG4pue5i6VLJ", 282.63795424 * 1e8}, + &TokenPayout{"DskzqXAQ5J79EZHsPfmZFH1pRirgdVxxdAL", 282.63795424 * 1e8}, + &TokenPayout{"DskzRpcYYgd8Drrv422iZwUhPF6rZ5Xwyf2", 282.63795424 * 1e8}, + &TokenPayout{"DskzrXMzwuNdGUciguF8CzjDrRmZ27VBTvZ", 282.63795424 * 1e8}, + &TokenPayout{"DskZsjbKdMJL1FNxEPdg4YMo12oo3zEz1n7", 282.63795424 * 1e8}, + &TokenPayout{"DskzUi3BNVpbR2RFRcmkikdZsXCRpD5wDK2", 282.63795424 * 1e8}, + &TokenPayout{"DskZYKpgwarMG9j95M3diyNoq9D9Xs9Jr7R", 282.63795424 * 1e8}, + &TokenPayout{"Dsm16EsvthWGGzXtzy4EYgih2tkLYDeEnLE", 282.63795424 * 1e8}, + &TokenPayout{"Dsm1Xj9zyKZ9WhcwPZYPjLMn5HD7MyHUTQD", 282.63795424 * 1e8}, + &TokenPayout{"Dsm2GBqBmFsguPf4C1TEf1uGTosAVu5SEZC", 282.63795424 * 1e8}, + &TokenPayout{"Dsm2U89EVpQB5XnnUFXFUx7WKYP3v8gkxjH", 282.63795424 * 1e8}, + &TokenPayout{"Dsm2WrPLj2P3xVB92B6zEAVqvxDmdE7mpxF", 282.63795424 * 1e8}, + &TokenPayout{"Dsm2xw2UGBhKMkzTrVDueoUGcBd35B3XeDi", 282.63795424 * 1e8}, + &TokenPayout{"Dsm36unPjQvgfZwsXzU9adBzdftXJDgYnRF", 282.63795424 * 1e8}, + &TokenPayout{"Dsm3Laqfz81DqKZJNxyWEwTJc4FGNGTkK5M", 282.63795424 * 1e8}, + &TokenPayout{"Dsm3T4N7EQ7J2BWZ1uen6jWUPnN1QvmC9HB", 282.63795424 * 1e8}, + &TokenPayout{"Dsm4777U53dAA3YZdefanTMLh78YAZ4PWe3", 282.63795424 * 1e8}, + &TokenPayout{"Dsm4wBvCxwa5zJn1RkpD1jdk2zEzGZyWYth", 282.63795424 * 1e8}, + &TokenPayout{"Dsm57W8cvbtgUCuXf7UAFFSehuUmRzNRba6", 282.63795424 * 1e8}, + &TokenPayout{"Dsm5rPz6GLp6VUQ6RHE3czXezoef2YWR1ts", 282.63795424 * 1e8}, + &TokenPayout{"Dsm7dYb8kcTjSYe5CvMPp7tL37LSuZi2K6J", 282.63795424 * 1e8}, + &TokenPayout{"Dsm7FkL7oGbn4BwqTLknpwVvcD4iRUYzDQh", 282.63795424 * 1e8}, + &TokenPayout{"Dsm7o4CnzcZ4Y2G6FWZdUnV1mQS4CwiAGwR", 282.63795424 * 1e8}, + &TokenPayout{"Dsm8B8bEu619DqMc53n6JsY9c3YSyPMWmfd", 282.63795424 * 1e8}, + &TokenPayout{"Dsm9eEFhvuPAL9nfia4jnEGUDNy7f1pduh8", 282.63795424 * 1e8}, + &TokenPayout{"DsmAeeQLmupSBDuDHEnvrL4i5c2MhJTXifw", 282.63795424 * 1e8}, + &TokenPayout{"DsmAzJqSrK2jQvf6nRaJCUMYvdvZPtD17PZ", 282.63795424 * 1e8}, + &TokenPayout{"DsmBFraSVpmAti7vz22etBbhK4qacnaeGUz", 282.63795424 * 1e8}, + &TokenPayout{"DsmBMc86B9BpKSMJRMcPwu9heQGAGvtPKFv", 282.63795424 * 1e8}, + &TokenPayout{"DsmbNfNPxttHnFnJEN2iZi7RcqZshXkiZ8N", 282.63795424 * 1e8}, + &TokenPayout{"DsmbsFnsUVimv4SB5X5SEv9ZnoutKhKC9UX", 282.63795424 * 1e8}, + &TokenPayout{"DsmBWfCaawQhRqtnoJGHPgHiRVjTqvkJjG6", 282.63795424 * 1e8}, + &TokenPayout{"DsmcE7aGLMrtWAbh6nnN4VBnxaqLFkJf3K4", 282.63795424 * 1e8}, + &TokenPayout{"DsmCvRDJT194tdFMXjNMpME6mjNVQeJkqq8", 282.63795424 * 1e8}, + &TokenPayout{"DsmCZeuKy8yp9TWhBULzCcaL22V1izrotxK", 282.63795424 * 1e8}, + &TokenPayout{"DsmD7j6x7CWzwjEd3KwvPY93XQHGDKhwSwn", 282.63795424 * 1e8}, + &TokenPayout{"DsmDC8d7m7GpZDSe3TGZiKb9M73ET65rd29", 282.63795424 * 1e8}, + &TokenPayout{"DsmdEQNqEtmMmzXcvU7a18xwH85t9Fis4Su", 282.63795424 * 1e8}, + &TokenPayout{"DsmDJ3eBm3TswokgMFAXjY3fokDuYUvhaJh", 282.63795424 * 1e8}, + &TokenPayout{"DsmdNPonEjUPDdWrv1fSqRfJNS8SRtZ9gry", 282.63795424 * 1e8}, + &TokenPayout{"DsmdpjBcfnAzftY9Hw9gVxjkeuyjNQ18Ak7", 282.63795424 * 1e8}, + &TokenPayout{"DsmDV3CvXFhaoh4fHQMusLDiW4MSMdsHY89", 282.63795424 * 1e8}, + &TokenPayout{"DsmdYRSERCPpeTwuFwV9uzqwoBac8git7o4", 282.63795424 * 1e8}, + &TokenPayout{"DsmegKrBADxtjgjGQmMSN5eDKJAfXptAXvN", 282.63795424 * 1e8}, + &TokenPayout{"DsmEM4d7TXhrwWdiWZDXj4iXHwqEYmCHQNE", 282.63795424 * 1e8}, + &TokenPayout{"DsmevHzQqcTNigsCyLKzJ3HA4MKByM776eU", 282.63795424 * 1e8}, + &TokenPayout{"DsmeVnpdbtMj4ADLagYx5H55SMC9V94wk1P", 282.63795424 * 1e8}, + &TokenPayout{"DsmeWhRyenxG8rFQx9Tma562K839DCLMPHe", 282.63795424 * 1e8}, + &TokenPayout{"DsmFDnyxSiNVdPSkSi8QE4vcDkPsirTZhUz", 282.63795424 * 1e8}, + &TokenPayout{"DsmfeG7vr7qeHjjsYs2bdW83XGbqGJ9icNJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsmfgb6wQiVTTnYKWBhw4vj2cvh5PEigNos", 282.63795424 * 1e8}, + &TokenPayout{"DsmG593fMtVrXyfwrDhGphtbLJHnSMQxUh4", 282.63795424 * 1e8}, + &TokenPayout{"DsmGCnZkkH5yDCCNz4XhULGLULVUCBDide6", 282.63795424 * 1e8}, + &TokenPayout{"DsmGE3QCN7HPtxo4pDUHDN2vCb1Yhhi9TiL", 282.63795424 * 1e8}, + &TokenPayout{"DsmggpWVSWxakBBcJLPkCMLq1vohtQT9aSi", 282.63795424 * 1e8}, + &TokenPayout{"DsmgHhbnudDG3AXCD1FLC6tKhwcr9UNQpxu", 282.63795424 * 1e8}, + &TokenPayout{"DsmGJ1DHHBquLSx1yjTYELfrg2AY7zLrsWG", 282.63795424 * 1e8}, + &TokenPayout{"DsmGopZKR38WEbDYxP4JEJ6Bxa97q84KmU2", 282.63795424 * 1e8}, + &TokenPayout{"DsmGSMFmSqRX81TMZCSeWG8DkmWgwoLdR93", 282.63795424 * 1e8}, + &TokenPayout{"DsmgzoVk8He9rAeLnrHY8rY5afWhi1sYmTV", 282.63795424 * 1e8}, + &TokenPayout{"DsmH4EsfRmBf1TxyT9hvJNHkmxSoWPBb9Az", 282.63795424 * 1e8}, + &TokenPayout{"DsmHC2UTX7N9oogdx5kmZWbBA7NXLQ9zPJJ", 282.63795424 * 1e8}, + &TokenPayout{"DsmHxziLXNKowLzS3SzhW7Et44Q8KveRy74", 282.63795424 * 1e8}, + &TokenPayout{"DsmHzHJVkVo2Q6Zy2nywRQxWTAPKCZX6wVK", 282.63795424 * 1e8}, + &TokenPayout{"Dsmih8eKMz4TpvsVWxRSgrmD1xgrk8ckjMt", 282.63795424 * 1e8}, + &TokenPayout{"DsmiMvUwS7cgQUWsqW2TtoWGrz2iQALZvf7", 282.63795424 * 1e8}, + &TokenPayout{"DsmizFFM3WuzTxNWkKP5m6e53F4P8Jts8Ar", 282.63795424 * 1e8}, + &TokenPayout{"DsmJ2D6TMjtTcNQjS8rBRpn654zYXwo6WuL", 282.63795424 * 1e8}, + &TokenPayout{"DsmjdV1S28khQSrFxbUtgFnYrsJLJ2i9tgW", 282.63795424 * 1e8}, + &TokenPayout{"DsmjE65SP8B1DmrvxgxPxFQLiVqrqZ48TcL", 282.63795424 * 1e8}, + &TokenPayout{"Dsmjn6WMQ2aZ72G9rHfAbVzv1KED3ThWXwQ", 282.63795424 * 1e8}, + &TokenPayout{"DsmjWBo5DKg3YSweNrpRXrejo2jJ8a1XSDw", 282.63795424 * 1e8}, + &TokenPayout{"DsmjZKw5HDre75pevWJiPNJzzDwaigSDC4D", 282.63795424 * 1e8}, + &TokenPayout{"Dsmk7714uZiy8idoP6gVS8mGEEx4sf7SuFT", 282.63795424 * 1e8}, + &TokenPayout{"Dsmk9M4fcq7JU33jyMSidU14Yb3QVrPY5RV", 282.63795424 * 1e8}, + &TokenPayout{"DsmL3dTRySFwnEmYBzWPP3V6c1N3tu8QaVn", 282.63795424 * 1e8}, + &TokenPayout{"DsmLiLA8cKUQWPk9egpFhVz16kNmAV8aUP3", 282.63795424 * 1e8}, + &TokenPayout{"Dsmm34TGrt2pSwELaBGHV8SeoVVZ2uvPcRC", 282.63795424 * 1e8}, + &TokenPayout{"Dsmm38EJMaBGMnYcGmG7KzjGAr9LGMLVE9B", 282.63795424 * 1e8}, + &TokenPayout{"Dsmma53ukRTbr6A2uj2xE8Nhr96hqo6tKUq", 282.63795424 * 1e8}, + &TokenPayout{"DsmMafWQGunC3ZEEF3CY8jFAt8PpPC2aMy8", 282.63795424 * 1e8}, + &TokenPayout{"DsmmTdhsZzkyiPXuiLGmRwPCVzJ6tf7b2Ss", 282.63795424 * 1e8}, + &TokenPayout{"DsmMwDyKKhJjdjU3VpvbDnJBCR86bv75i8A", 282.63795424 * 1e8}, + &TokenPayout{"DsmMWPipYNhpFc3seJo3YBWFndrBCgkWqaN", 282.63795424 * 1e8}, + &TokenPayout{"DsmmZLCeT4wkPjVXM17K3XWwzKgtb85s8m6", 282.63795424 * 1e8}, + &TokenPayout{"DsmN1mhy52hdfuS5PdpUAobYFdiZe9b3rjB", 282.63795424 * 1e8}, + &TokenPayout{"Dsmn1qVqLYGK1K9k1LuWDXdrM7LL9NxYUbD", 282.63795424 * 1e8}, + &TokenPayout{"DsmNF4Smi35PACr5UFM7t2QqZVvM4qzeaL8", 282.63795424 * 1e8}, + &TokenPayout{"DsmoxaSFsFoLzoPxpZfGZPMkf4vQqYwk7sr", 282.63795424 * 1e8}, + &TokenPayout{"DsmPczzT4jkvyi66WuDzHPwux7JFdpujRG9", 282.63795424 * 1e8}, + &TokenPayout{"DsmpfoVoU6Dvz7XPiG82KjmGroK5Kqujp13", 282.63795424 * 1e8}, + &TokenPayout{"DsmpfzwcHfMvyfapogix9dajLBoEjZVM3T4", 282.63795424 * 1e8}, + &TokenPayout{"DsmpJuATBGsEiBGnS3SAo5JMxZmQJSwZWrw", 282.63795424 * 1e8}, + &TokenPayout{"DsmpoyXSQFLoDBGyq2RhCTdUHEj2UoSmizS", 282.63795424 * 1e8}, + &TokenPayout{"DsmqL3b98cFE7snqvZ5g3fbXvJwNEnDbRfT", 282.63795424 * 1e8}, + &TokenPayout{"DsmqLFheFSuKK6S1FmpnECELc5whi5bFvKF", 282.63795424 * 1e8}, + &TokenPayout{"DsmquhdD8U378MssgskBHiWjY8brNNAVe67", 282.63795424 * 1e8}, + &TokenPayout{"DsmQVdWCcgXJp3E9mQ98ua58665kkgAGH9J", 282.63795424 * 1e8}, + &TokenPayout{"DsmRMnwLZ5k4BFpqJFbjfMRsodkTQmVwyHD", 282.63795424 * 1e8}, + &TokenPayout{"DsmrnWvSsLmv4zmv8cR44eRhEhWJThFaAiv", 282.63795424 * 1e8}, + &TokenPayout{"DsmRsHzLjk7xpHocYaypvSVQ9tbHQ4keiaC", 282.63795424 * 1e8}, + &TokenPayout{"DsmRSK68HF5xN8qAs4nr2fJvw4jy8YJJ2QR", 282.63795424 * 1e8}, + &TokenPayout{"DsmRVWSwrDbKNDof6WWvwuNkRF3nJ6nfCs1", 282.63795424 * 1e8}, + &TokenPayout{"DsmRXF7Yj99gtrLPEypPCdHndenX5oY2B6x", 282.63795424 * 1e8}, + &TokenPayout{"Dsms47Fmnk4Qn5zNvn7KqqwBJGimu9RKpPt", 282.63795424 * 1e8}, + &TokenPayout{"DsmsjpAfVeibHegQNS9eBiQq2W4EQmMAA9A", 282.63795424 * 1e8}, + &TokenPayout{"DsmSm8TsYi8VWVeaTX5kFhwKC1CNw8hAoWn", 282.63795424 * 1e8}, + &TokenPayout{"DsmsUJ9NyxRskFboD8xf685ShnHRVa4Jm2c", 282.63795424 * 1e8}, + &TokenPayout{"DsmSuwnCJmofyst1Kuhxpec3WMAC3uTe73w", 282.63795424 * 1e8}, + &TokenPayout{"DsmtMskZVzvDgtsdGgATuN9fDKT5KDgfLZU", 282.63795424 * 1e8}, + &TokenPayout{"DsmTpbjoSTd8Sjg55m7bE8uVAX1BYnohp35", 282.63795424 * 1e8}, + &TokenPayout{"DsmttD2mMcpSDZw35p9Zdeva178xtjjDXkr", 282.63795424 * 1e8}, + &TokenPayout{"DsmtwPzmA7PnM6tRdLm4ncHSbx9KT5FiqhG", 282.63795424 * 1e8}, + &TokenPayout{"DsmTypupZPLGu5SH6YayFJ9sWYpB1n3ojyU", 282.63795424 * 1e8}, + &TokenPayout{"DsmU4jqM8DUY759tcoMiLpiyrEsixiHLKkR", 282.63795424 * 1e8}, + &TokenPayout{"DsmU6a4JRYX9H37wQy3SDaNKSjqumFJpeaQ", 282.63795424 * 1e8}, + &TokenPayout{"DsmUAcMt2yLs8281Mhmi76qFMGUYPPo1jYe", 282.63795424 * 1e8}, + &TokenPayout{"DsmUPc7tYgMKzbNQf72JP2oXdoJ8Nn8mm5g", 282.63795424 * 1e8}, + &TokenPayout{"DsmusgeQdX6k5PJ998KjjJbyVGyxE7xeSmt", 282.63795424 * 1e8}, + &TokenPayout{"DsmuW4fELxAKr6jCAmkKPETBS7jWxraS7ry", 282.63795424 * 1e8}, + &TokenPayout{"DsmuyWMx8EEH8z7mp7JjXqMfyVgCvLdenz3", 282.63795424 * 1e8}, + &TokenPayout{"DsmUzNG7FVRbzqDVDfNd3nYiXNPWvMA8H2Q", 282.63795424 * 1e8}, + &TokenPayout{"Dsmv1iy4aVQJRNp4uQ1ZYSB18E1fq7g8BMb", 282.63795424 * 1e8}, + &TokenPayout{"DsmV5Bg227DHz8RFP1PMPJSsgA3X5RjeUZy", 282.63795424 * 1e8}, + &TokenPayout{"DsmVAk2xohYEucQXyxjMgvJobV1j5K5LudC", 282.63795424 * 1e8}, + &TokenPayout{"DsmvdM694rBgQZraGw1drRnHQoqWKhCv2yM", 282.63795424 * 1e8}, + &TokenPayout{"DsmVzMMdKScctQEMogmEMWs1dJVhmfTmuND", 282.63795424 * 1e8}, + &TokenPayout{"DsmW54NsBeydv3spbp3rYuTxrsvXcb7fArT", 282.63795424 * 1e8}, + &TokenPayout{"DsmWgaKCsW4EnjVkBY2asKTLuMdbBN1u5ph", 282.63795424 * 1e8}, + &TokenPayout{"Dsmx1RDtNZb8qTpJTDG6oF6VytM8XYQjTyo", 282.63795424 * 1e8}, + &TokenPayout{"Dsmx7wG5czsDQqo717hM2P9N2g6maHSgWFE", 282.63795424 * 1e8}, + &TokenPayout{"DsmXe2v2X8oJmqe4oz2Ky1pdZMmUYTd3oDF", 282.63795424 * 1e8}, + &TokenPayout{"DsmXNdciZLMM7id844tST2T6MVnW1GfAPLb", 282.63795424 * 1e8}, + &TokenPayout{"DsmXsq7HSwbna5h8Fq1u7DfVcFihVigefxt", 282.63795424 * 1e8}, + &TokenPayout{"DsmxTnuu8uwEjK7doJVEL4grXhU56xsaAzn", 282.63795424 * 1e8}, + &TokenPayout{"DsmXuENHUiGPCFUZ2HQYj9MRGRTTKCQwv84", 282.63795424 * 1e8}, + &TokenPayout{"DsmxY662Hvm5d7SsQYEvYvgcbFMpFagqFo7", 282.63795424 * 1e8}, + &TokenPayout{"DsmyARg18TG232becMWr95oJZ2MhvKYkxXX", 282.63795424 * 1e8}, + &TokenPayout{"DsmYuZJP1gDBrvJSin6rYcvoBdxZi7KyVFC", 282.63795424 * 1e8}, + &TokenPayout{"DsmZFovHNt2uCne8EcNEs37WmV77Y4cmx7x", 282.63795424 * 1e8}, + &TokenPayout{"DsmZNzuRhK6RCwjeKUd25PrKu5Mw5J19aYm", 282.63795424 * 1e8}, + &TokenPayout{"Dsn15Sca1FxWEJQT1uW3fWmfNpyg7nVo37o", 282.63795424 * 1e8}, + &TokenPayout{"Dsn1hDtSyTs4NGSH7Rw4UCnbjRs3kBBSzR1", 282.63795424 * 1e8}, + &TokenPayout{"Dsn1uXBxcK2FTbg46Nwmzq86T1GUfyFDWPM", 282.63795424 * 1e8}, + &TokenPayout{"Dsn2bd3Dc6VXSvf5JdeF3sddRkcWaGqPbAy", 282.63795424 * 1e8}, + &TokenPayout{"Dsn2rGCXAZMEW81xR8C7cgJLUb88FGzvWsk", 282.63795424 * 1e8}, + &TokenPayout{"Dsn3Bh8d1fwB3WZFkkEg447NkyWdgAoM7Lc", 282.63795424 * 1e8}, + &TokenPayout{"Dsn3h83b9TqcA52C5knHMQNctBgJuQqY35X", 282.63795424 * 1e8}, + &TokenPayout{"Dsn3NoKxZXUboesNJ7MpnA9oNcAr9axhuJ7", 282.63795424 * 1e8}, + &TokenPayout{"Dsn41ujE9hx4X3XvsCmzt3vWXSKT9Ea8owP", 282.63795424 * 1e8}, + &TokenPayout{"Dsn4pQ2py4nVD9Mq3MJSwkXmaYNRHBHQWaw", 282.63795424 * 1e8}, + &TokenPayout{"Dsn4VaFfJvzHDThqFW1fLcj32NQLm79xVGG", 282.63795424 * 1e8}, + &TokenPayout{"Dsn521mXVmcUDPcPRUqZGVzRYH37RSFKmjE", 282.63795424 * 1e8}, + &TokenPayout{"Dsn58utZBWhHGhwqPMd7EWLdQwxVJN9BNLZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsn6GfnYUnZ5AXgC7SSBt8Rbfeg2jGsNhBJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsn6gUo7mPmwc7bz5f1138s8aBJCZz7A6gm", 282.63795424 * 1e8}, + &TokenPayout{"Dsn6J8vtEGUvPds1ZpWRDR14SYqDnJQRt1v", 282.63795424 * 1e8}, + &TokenPayout{"Dsn6wwPiVt5QwDp1VbrWwsBiaEzUF8jpoog", 282.63795424 * 1e8}, + &TokenPayout{"Dsn7bNTBuGrUvAMLu2FqmVhtYXsFVhqmoWD", 282.63795424 * 1e8}, + &TokenPayout{"Dsn7ttuPuKBHKCzunvoMNFzsf42YHTHNLF9", 282.63795424 * 1e8}, + &TokenPayout{"Dsn8FhaqnCa211UfP2GXZdkyepKhMo7YQur", 282.63795424 * 1e8}, + &TokenPayout{"Dsn8RSzoGBH4bkFetHqMjeK3xz1FwCmKZij", 282.63795424 * 1e8}, + &TokenPayout{"Dsn99SUXETia5NBwqEiHyu9njK61522vwFj", 282.63795424 * 1e8}, + &TokenPayout{"Dsn9nKLdKZpBRjqMmXx6rUm5o9HrVi7Vc5o", 282.63795424 * 1e8}, + &TokenPayout{"Dsn9Wf5tV8s8EaSKMfUPFzmg9PPNuf5b4Tq", 282.63795424 * 1e8}, + &TokenPayout{"DsnA7UNJNVecMBnPcsuaQxSwMKxtdTV9KY7", 282.63795424 * 1e8}, + &TokenPayout{"DsnaAq4hTjBvbim5TNSuGXwbdec7phsttg9", 282.63795424 * 1e8}, + &TokenPayout{"DsnaCD1KowEqVdtDvqLCDKiehjgs16Hzige", 282.63795424 * 1e8}, + &TokenPayout{"DsnaQMUPtGDFrRvPLrGXx14cma1aqteSgfu", 282.63795424 * 1e8}, + &TokenPayout{"DsnB9tTfRSHnhtbM9B65GcDgstryTUvAdrx", 282.63795424 * 1e8}, + &TokenPayout{"DsnBgBEmQQgKXNfPfkEA27fT3TaT7EdeVdt", 282.63795424 * 1e8}, + &TokenPayout{"DsnBgnvLcPgKAiAqVj5XcRwmdLk1h8mN3tW", 282.63795424 * 1e8}, + &TokenPayout{"DsnbJy2z4r9oWaFnYiAXrSZ1LG8MD1vteej", 282.63795424 * 1e8}, + &TokenPayout{"DsnBkwTE9CJbdbtpbJPB5ePN1mkFEPirqDS", 282.63795424 * 1e8}, + &TokenPayout{"DsnbPkfD266XQwYWpyhy2oUyk3uk3HkQgkr", 282.63795424 * 1e8}, + &TokenPayout{"DsnbXYAXyz2Gj7QreFjnRUoGHmvQ6ccXQuM", 282.63795424 * 1e8}, + &TokenPayout{"DsnCdHLnwnMyHQinVc2vwqB6W8QPTtJf6Je", 282.63795424 * 1e8}, + &TokenPayout{"DsnD7eTL76kcy6yY7P8dpXZULKHbwdtKb6r", 282.63795424 * 1e8}, + &TokenPayout{"DsnD8W7jEGVUuzUaGQvSPNnuy2djon6a1ZT", 282.63795424 * 1e8}, + &TokenPayout{"DsnDbsAkmyyYXurMhExp6WCcdCBRNvn1ZcY", 282.63795424 * 1e8}, + &TokenPayout{"DsnDTAGATDqQnPBBV1uCw8EHvWME3tWhU5z", 282.63795424 * 1e8}, + &TokenPayout{"Dsne7DzcYwdLEC8D9h9kuhjwDbh7e8bCCZU", 282.63795424 * 1e8}, + &TokenPayout{"DsnEBNgCrFpvP6yfTrozNRvbGB9F4tgRf7d", 282.63795424 * 1e8}, + &TokenPayout{"DsneeQsSnozzTXzvbYTZRaqawwVv7QTCKDt", 282.63795424 * 1e8}, + &TokenPayout{"DsnEFEpub6FaU8prRntGBVT7mrgfrk2ViFd", 282.63795424 * 1e8}, + &TokenPayout{"DsnfJiBfKwfsYQvfDctAEhou1wnEbQFWR3x", 282.63795424 * 1e8}, + &TokenPayout{"DsnfTbDZRC6E9YHTFKX54AedvPyWSUaoide", 282.63795424 * 1e8}, + &TokenPayout{"Dsnfy71pGD1rDuHVkgubNYXjZ94uZL4Zw82", 282.63795424 * 1e8}, + &TokenPayout{"DsnfzEvqXiCCuKzV6VeYm6x6c2jTMesrcVM", 282.63795424 * 1e8}, + &TokenPayout{"DsnG1phyK84fVTpQKu23jFnY2ntmgoea2zJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsng2cqW9znDL6z7UP3iZVDbgnvPk4f2XYb", 282.63795424 * 1e8}, + &TokenPayout{"DsnG85zkuEJgppqQEkRNktfry3BWCumFWTP", 282.63795424 * 1e8}, + &TokenPayout{"Dsngb94ULsUqJ9Y6a21RFP3PuHGfW1NJHod", 282.63795424 * 1e8}, + &TokenPayout{"DsngDeXvgkaY2KF2uWk2EjTAU84G2du7F2h", 282.63795424 * 1e8}, + &TokenPayout{"DsnGhvtvCYA8nSbajMQK5xZbASpQfa4YFX2", 282.63795424 * 1e8}, + &TokenPayout{"DsnGJuFSH3bJsuqtL4snGuQqRyYYF3vYEvX", 282.63795424 * 1e8}, + &TokenPayout{"DsnGVSXsYuF8gEU2PANmnhpbPn2C4DERQYL", 282.63795424 * 1e8}, + &TokenPayout{"DsnhmLRZ7F2VuibDd83mv2k2KtNja3iigmm", 282.63795424 * 1e8}, + &TokenPayout{"DsnHo1nNruwf2MyGwpRfBWcta5vD8dR5KTa", 282.63795424 * 1e8}, + &TokenPayout{"DsnHWq3bNpqVyjmVVd41pGzcE7gur78bkuQ", 282.63795424 * 1e8}, + &TokenPayout{"Dsniq5ZZ1NMjZZ8SBc8zdKfbF1tFdP2C5ob", 282.63795424 * 1e8}, + &TokenPayout{"DsnJ55Y4uKcvjtgkcvzPxjn74oncqAJ3JCh", 282.63795424 * 1e8}, + &TokenPayout{"DsnJa2SScy8oFWEfJHcDdU5jK2vjZ5BN4qu", 282.63795424 * 1e8}, + &TokenPayout{"DsnJKRbNLmdUy5cPfWJHUexP6TQxqtJhUpr", 282.63795424 * 1e8}, + &TokenPayout{"DsnKpfPqwWC5F5VoK3UqPYdtQiA7cFcLryR", 282.63795424 * 1e8}, + &TokenPayout{"Dsnks5jHLYk2Wveb5YgutLvXtoLoN6B9n81", 282.63795424 * 1e8}, + &TokenPayout{"DsnKsZ7ZaZzgS16uJrVA8yMACtiABRvGSnQ", 282.63795424 * 1e8}, + &TokenPayout{"DsnkXwCh89dDoY4LmdbcctdHAK5xvoX4fSU", 282.63795424 * 1e8}, + &TokenPayout{"DsnLwVYHyzGp5Ps6waY4WMA5A62HttSuC84", 282.63795424 * 1e8}, + &TokenPayout{"Dsnmmw3LZzQAHdvGekHEKQyKCK7VQKjwEf3", 282.63795424 * 1e8}, + &TokenPayout{"DsnMoUwpetwa16E7efPiCM9Q1gusvPhaFWg", 282.63795424 * 1e8}, + &TokenPayout{"DsnMP18TVkJMFRxZCKDSXoEoknKDPE1Zcb9", 282.63795424 * 1e8}, + &TokenPayout{"DsnN6urC24o4s66YFYQu6Qrq1PyuBgTA2ja", 282.63795424 * 1e8}, + &TokenPayout{"DsnN7jhveS9BJbak2PGK2kcqgSUXuAaceMF", 282.63795424 * 1e8}, + &TokenPayout{"DsnnzB3m2N2kYVvR3ci3137FiXYbbP4GEZu", 282.63795424 * 1e8}, + &TokenPayout{"DsnnzBePQLTqq891H7LNMg6CdgZLDnckTjS", 282.63795424 * 1e8}, + &TokenPayout{"Dsno6bgeFmw5SnQ1oHMbcFamDQUtmaJHpvV", 282.63795424 * 1e8}, + &TokenPayout{"DsnpmUwpznBQUFwY1LU45ZB2otjPpuq5yRm", 282.63795424 * 1e8}, + &TokenPayout{"DsnppFXZjwj2sYreHcMvJcoBosyP817PXzH", 282.63795424 * 1e8}, + &TokenPayout{"DsnPxoyW3ziXuWWyz15sqvSkeWV6jbn33aZ", 282.63795424 * 1e8}, + &TokenPayout{"Dsnqd2Fh9e2VfRs8mKrPndjsvota9dcdgQp", 282.63795424 * 1e8}, + &TokenPayout{"DsnqD8B2MZJLh7JXHALxGcZ2hnH9T7yPDPc", 282.63795424 * 1e8}, + &TokenPayout{"DsnQHw9Tv15QFqgEYSDoaruxMgcxCmHbhLC", 282.63795424 * 1e8}, + &TokenPayout{"Dsnqjf5bfw1YYw1FUYN5RZoDz3p7oH6LS9G", 282.63795424 * 1e8}, + &TokenPayout{"Dsnqp6LPFMSADn2cR5FFYgFuV3Q7L4jN9GX", 282.63795424 * 1e8}, + &TokenPayout{"DsnQTHyTNXkwFmGR7NuMNTUdRcWQmFtJbZY", 282.63795424 * 1e8}, + &TokenPayout{"DsnQVJYcZZkTE75kXTmNzpxEFRT56eE4GHu", 282.63795424 * 1e8}, + &TokenPayout{"DsnR4nT8gSnYLaqmomsJjoEnCijQ6z1qxQh", 282.63795424 * 1e8}, + &TokenPayout{"DsnR7KkJc5onfKqZDB7PV7AWoMk1MjgRewD", 282.63795424 * 1e8}, + &TokenPayout{"Dsnr8Xpk1E2mPVpw2UCmsSGhm3KBizKMohs", 282.63795424 * 1e8}, + &TokenPayout{"DsnRbhq8vCbYJJyAs8YqD3AMTPqPnpCyw6z", 282.63795424 * 1e8}, + &TokenPayout{"DsnrFE6nR6XLtYPD2js6xhx2tBbTAsJvheW", 282.63795424 * 1e8}, + &TokenPayout{"Dsnrh8iscZRTrjR1XggY5MV5gcEnGdjyg3i", 282.63795424 * 1e8}, + &TokenPayout{"DsnrM1535q223uiaWBrfXkoHfR6gFQteSTx", 282.63795424 * 1e8}, + &TokenPayout{"DsnRrPgJkzaLacC2utVfpAE9xoDHfW2jWuR", 282.63795424 * 1e8}, + &TokenPayout{"DsnsK7g4Vemdwwm4YPXf9ERpzob1FSMf14G", 282.63795424 * 1e8}, + &TokenPayout{"DsnSo6dkBFz4etLLefX6JGti8r9TMtSXHwW", 282.63795424 * 1e8}, + &TokenPayout{"DsnsrAgfBrTaFd6d6hD3KaAD7r2Te4Dv9TN", 282.63795424 * 1e8}, + &TokenPayout{"DsnsrxFjbM1t7e8tkvbXrB5B9nU3Z1WQ51u", 282.63795424 * 1e8}, + &TokenPayout{"DsnsteqJecunYstyFHSLewCq9TWrGVSgb8C", 282.63795424 * 1e8}, + &TokenPayout{"Dsnt5BxoWw7CwzVbCeUd3352FXzfk2EjnPb", 282.63795424 * 1e8}, + &TokenPayout{"DsnT8uaSBB9jW6f7YBfTTrBVKBK53eVyH4K", 282.63795424 * 1e8}, + &TokenPayout{"DsntHX7Tw3EiSiME4GDjm6zCUqM3wt8u7Mt", 282.63795424 * 1e8}, + &TokenPayout{"DsnTmqvtrsMURGSswS7wwTj13mHqiPrB9Pp", 282.63795424 * 1e8}, + &TokenPayout{"DsnTZ78DBoeABxLncs8ZsaCPRxmQT7wuatX", 282.63795424 * 1e8}, + &TokenPayout{"Dsnu1o2UhGa972LRYu2pTWu4nrp4f4NorCX", 282.63795424 * 1e8}, + &TokenPayout{"DsnuE4YyWUi3VUcQ7EczyJ4RGh6wVZgyJ9U", 282.63795424 * 1e8}, + &TokenPayout{"DsnUitpoi8DZDeXsusMjLQKrBFSxoviEhwJ", 282.63795424 * 1e8}, + &TokenPayout{"DsnurN7M4YFqWfFzYwGfkmZQTrRA3pLfPbg", 282.63795424 * 1e8}, + &TokenPayout{"DsnUzUboR6NtdkGZYqnzcvP22ihFGG4mMyF", 282.63795424 * 1e8}, + &TokenPayout{"DsnVbfZJcFTQhArGxdGBgq6kB2LzRf7Pvo9", 282.63795424 * 1e8}, + &TokenPayout{"DsnvfbUu6XCTopcDvjvmi8fjijQYEbaEm3i", 282.63795424 * 1e8}, + &TokenPayout{"DsnVHW1BKJoGmL26d5AgncyA2Muhscxe6Wo", 282.63795424 * 1e8}, + &TokenPayout{"Dsnvruk3oML97oR4BD1wDXWgx3KSF8u6cGe", 282.63795424 * 1e8}, + &TokenPayout{"DsnvuPxnM1xzXoup8hZwBrzq9caFMuT1JLp", 282.63795424 * 1e8}, + &TokenPayout{"DsnwCiAuN4wsceQLZ9MHGNsg8yENBz6axbP", 282.63795424 * 1e8}, + &TokenPayout{"DsnWdz5SBBtmm7E6MmArfdUBhnwThq5g6eG", 282.63795424 * 1e8}, + &TokenPayout{"DsnWt1XenwzCgw1viYMhh8M2vfnFCokZSki", 282.63795424 * 1e8}, + &TokenPayout{"DsnWt7abiAaymxHH9kSYYQt8iz3SMRuXWcf", 282.63795424 * 1e8}, + &TokenPayout{"DsnWY7eDydsjd2CeDa3x5YDQcF8L6pCTpRD", 282.63795424 * 1e8}, + &TokenPayout{"DsnX6P5NHSDuwAmLNTrWJfa6CRzy979xoMY", 282.63795424 * 1e8}, + &TokenPayout{"DsnXGShxDuePaX4nhbEk5X7jNsU5bLx8rLt", 282.63795424 * 1e8}, + &TokenPayout{"DsnXu4vNHttWxexPtDZb7rd5aHLnbY7W1yB", 282.63795424 * 1e8}, + &TokenPayout{"DsnxufGNDm2mMTiNrCjduRufZ2UvwVRFonP", 282.63795424 * 1e8}, + &TokenPayout{"DsnxupKyDfdZnbUnNTdnXUecQJgHJjG4juN", 282.63795424 * 1e8}, + &TokenPayout{"Dsnxy9sSme9A1yEU6bSDGaYfxHDse36NPJC", 282.63795424 * 1e8}, + &TokenPayout{"DsnY2SKv8RAB3mjXeUxVPqqdYXJZDMHjABL", 282.63795424 * 1e8}, + &TokenPayout{"DsnybLYXWUwjvPAP6Gmnm8fNPpNhUt1AhB7", 282.63795424 * 1e8}, + &TokenPayout{"DsnYda77rVbyFqc5tV43cohFpWwDpkaUirL", 282.63795424 * 1e8}, + &TokenPayout{"DsnydY8LmdGJk6VgrtA1Voi4BAGVW5bWfVy", 282.63795424 * 1e8}, + &TokenPayout{"DsnyoysNeYNSkGxKZ3V7B9gC8NrPeThiBdS", 282.63795424 * 1e8}, + &TokenPayout{"DsnYQfmgwCdsh2FmGzk8VjcuRqy2QThrsPP", 282.63795424 * 1e8}, + &TokenPayout{"DsnYR2TFAPpLGamWrV5aavX4XWJy39aKc8r", 282.63795424 * 1e8}, + &TokenPayout{"DsnZoGvXYTDRUQBLb9mWsDbJkJvTFDaVDYo", 282.63795424 * 1e8}, + &TokenPayout{"DsnzXQ7Kfh7F1Bx4BTY8CofyUdLeY4ibecb", 282.63795424 * 1e8}, + &TokenPayout{"Dso1xX7jTg8qoeqy67rkyz5m8vixKz7oYD9", 282.63795424 * 1e8}, + &TokenPayout{"Dso3WQbomt59Avq8zz96pLELjzFh7kgGfed", 282.63795424 * 1e8}, + &TokenPayout{"Dso4DBxjX3x4c77xjxsXKX63VEMh6jo5cqK", 282.63795424 * 1e8}, + &TokenPayout{"Dso4RLYbAqqMPzN7Y81rmHgv3yREYZrh2yR", 282.63795424 * 1e8}, + &TokenPayout{"Dso5ij1qQdqoCJejyiTGG599TpTZ7KPncwz", 282.63795424 * 1e8}, + &TokenPayout{"Dso5XnLnMmgomfAF9T5H83Kfu4ENf77tqyS", 282.63795424 * 1e8}, + &TokenPayout{"Dso62zdsYYLBjcANK7HwtVHqnPUPYgXZx9B", 282.63795424 * 1e8}, + &TokenPayout{"Dso67piN7K9v8UAPBQxN616jD5rXuMU1mfi", 282.63795424 * 1e8}, + &TokenPayout{"Dso6aMT4o8Xxq6SUbxARjkEhqhGUtkaVLUG", 282.63795424 * 1e8}, + &TokenPayout{"Dso6oJzv4WRxUnU8z2xGfPBtRd6e6YoVCfM", 282.63795424 * 1e8}, + &TokenPayout{"Dso7ZBC3yEoQwQ4uT9iHNeEZgYevmYqVzLV", 282.63795424 * 1e8}, + &TokenPayout{"Dso89MmTMtmhFNhDS69X2g2ScTULvKioNX4", 282.63795424 * 1e8}, + &TokenPayout{"Dso9BRPKj9MUzB19oGwTJ7EUnNRH7CoYoMj", 282.63795424 * 1e8}, + &TokenPayout{"Dso9odXCDtN3hGsQvgwG7fzC1wY5mS8bnHn", 282.63795424 * 1e8}, + &TokenPayout{"Dso9Rw1pgEY5qxvx2cPswUJBUdNHk9H3rQU", 282.63795424 * 1e8}, + &TokenPayout{"Dso9zn1inoJAmujPnxSjaZ3ExAPVv5ia9M7", 282.63795424 * 1e8}, + &TokenPayout{"DsoA8qjkcLseLjnAtyWi6gSah6F3EnYGDs5", 282.63795424 * 1e8}, + &TokenPayout{"DsoAChoh3b9K4tkopcE9vTais9qvMKb8X3j", 282.63795424 * 1e8}, + &TokenPayout{"DsoaHsqCMeFeQYzXbXgqPTWk6deGTmqVhqw", 282.63795424 * 1e8}, + &TokenPayout{"DsoanwikdCNRzn49VmcY9UydCXb7vVh18KG", 282.63795424 * 1e8}, + &TokenPayout{"DsoaTM4Zp33Aey4Wmcn53Zq263djoRu3qw5", 282.63795424 * 1e8}, + &TokenPayout{"DsobBjeL9rutYtaBxqyA52DtzQUBUDQ3Bv2", 282.63795424 * 1e8}, + &TokenPayout{"DsoBkaUuvjjhYGVrw2kWptN3xpJeELra3oU", 282.63795424 * 1e8}, + &TokenPayout{"DsoBPekcCwyynPVL3inmGMaPvtVp1sw3UCk", 282.63795424 * 1e8}, + &TokenPayout{"DsobsfBCeiH6XEFM7qvLVPqvmuqxvV2tEY7", 282.63795424 * 1e8}, + &TokenPayout{"DsoBV6JpjGqsri5hP3etciesjUYkyGSfqYH", 282.63795424 * 1e8}, + &TokenPayout{"DsoC1nfDG48iqV7fPxpbFKwXihMMLq35X2B", 282.63795424 * 1e8}, + &TokenPayout{"DsoCkbvKuGCPtwCTx2rgDQJy7AMuhrohGit", 282.63795424 * 1e8}, + &TokenPayout{"DsodfQ8tihQqBnB8c5P376nrurnFxiaBS9U", 282.63795424 * 1e8}, + &TokenPayout{"DsodJFEmss5tfm2gsVKS9LikTUJpJW7zdsv", 282.63795424 * 1e8}, + &TokenPayout{"DsoDKWr9wji2tiPJLPvHUbHdEpqbSAjHpPL", 282.63795424 * 1e8}, + &TokenPayout{"DsodLp3cZkJ7MMwg8yo2qZ8LgD9asf29Sag", 282.63795424 * 1e8}, + &TokenPayout{"DsoDw9u9XN5qPRys7SBKJnTQifkfRvSxS66", 282.63795424 * 1e8}, + &TokenPayout{"Dsoe6kSbdXAy2iWJenuUgrvJSJE9onVBGHr", 282.63795424 * 1e8}, + &TokenPayout{"DsoECZbbtzkuwU1sFCS2zjqkUv6c3h3VaZ5", 282.63795424 * 1e8}, + &TokenPayout{"Dsoeq4B46DPJVZiFV5VXEszD6GCkNddEcTy", 282.63795424 * 1e8}, + &TokenPayout{"DsoEYEvJ4pCyorfiXZmogVN3jquDtaE5FMz", 282.63795424 * 1e8}, + &TokenPayout{"Dsof8AWya23km8MUrKovSUiiWDU9YNmYW2p", 282.63795424 * 1e8}, + &TokenPayout{"Dsof917ByNa3J4hvJkbpPPRcdGcWA4aWouy", 282.63795424 * 1e8}, + &TokenPayout{"DsoFiGz2i3z3GJJtNBpfTgPg21ddJmf5Aj8", 282.63795424 * 1e8}, + &TokenPayout{"DsofNvavJzHbDGE9rxvhD83m8wjdGNA8o4n", 282.63795424 * 1e8}, + &TokenPayout{"DsofrMizU8CQ2ADASeow4NhNKFRnztGNdBs", 282.63795424 * 1e8}, + &TokenPayout{"DsoFwBV6g1Bt3FDPXyGck84MNYxcsWcjG7r", 282.63795424 * 1e8}, + &TokenPayout{"Dsog24nftLb1rR6EpYfSsMfbSwc5YYvbuEu", 282.63795424 * 1e8}, + &TokenPayout{"DsoG9zSRgdX4pdq8BP1p84L7gqABxFCDkie", 282.63795424 * 1e8}, + &TokenPayout{"DsogBQxvh6CFk2Mot1P5AhhggBNHUY9483V", 282.63795424 * 1e8}, + &TokenPayout{"DsogECLEtzuAiamWPP3AcCzYBkJm1Qih3eQ", 282.63795424 * 1e8}, + &TokenPayout{"DsoGh59NkijCy17k2m5fceo6sJFNLJHem4r", 282.63795424 * 1e8}, + &TokenPayout{"DsohcFDcGrfbJ9R5gSqLhKGR6Syinhy1mrg", 282.63795424 * 1e8}, + &TokenPayout{"DsoHdpCi8QA2dRuDv3Scjsy1h8Amo6eaFkX", 282.63795424 * 1e8}, + &TokenPayout{"DsohGmUC8f3SH8yWGGbrZFXRFCPS424yBy7", 282.63795424 * 1e8}, + &TokenPayout{"DsoHmNbAWeVeCD3WAhUb955yvbkgYWvrvcC", 282.63795424 * 1e8}, + &TokenPayout{"DsohpmHxBvkpp8XqsDMxouDbNoFRrxx6WUG", 282.63795424 * 1e8}, + &TokenPayout{"Dsoi4xV6poxBptkioHd7tMEF8Qvg3hunPPX", 282.63795424 * 1e8}, + &TokenPayout{"DsojKyxMYNivigkp1q4wPesub7Td3poJnNM", 282.63795424 * 1e8}, + &TokenPayout{"DsokeAHqq2RHRSSoeiBugE8MvYnp2Ds38Fs", 282.63795424 * 1e8}, + &TokenPayout{"DsoKHmwdL9a6ychPsTC5FHUuLN7W5bU9dXE", 282.63795424 * 1e8}, + &TokenPayout{"DsoKVexwaQZnS8bV4jJed3jmaqnS1FpSfL4", 282.63795424 * 1e8}, + &TokenPayout{"Dsokw2L3wzvmdLwEZ1Mr1adQ1txLyUg7qyG", 282.63795424 * 1e8}, + &TokenPayout{"DsoL4oWL6FcDju6JAGVS1f4oye1gZtNrvzb", 282.63795424 * 1e8}, + &TokenPayout{"DsoLm12CNT6UEfcasPjo9GwgQ1SYGyAdLLp", 282.63795424 * 1e8}, + &TokenPayout{"DsoLTtQb6benUDY3PD5dVeGefThGjQanwN1", 282.63795424 * 1e8}, + &TokenPayout{"DsoLzB3g1TKZUtBf9iRP7Kf9LQt6H25Eu3s", 282.63795424 * 1e8}, + &TokenPayout{"DsoM9qdaXQv4D9NJwiCCp4fZ73qQ9wNCvkg", 282.63795424 * 1e8}, + &TokenPayout{"Dsomdx6gdEXVVHmwJmEmdFyRZbpgfe45jte", 282.63795424 * 1e8}, + &TokenPayout{"DsoMFU6DH2b85VeSMpdTGC5gECqKbJ8qq4x", 282.63795424 * 1e8}, + &TokenPayout{"DsomY5WDPBpNwQA2smVbWFSFER9e5SnswLc", 282.63795424 * 1e8}, + &TokenPayout{"DsoNKGYWsGUqFKqHSDZpUzxhgAoWNoZmGh7", 282.63795424 * 1e8}, + &TokenPayout{"DsonLFrFc1C9QW9TVZ2fKfLYKH8uNWRa1R6", 282.63795424 * 1e8}, + &TokenPayout{"DsonqzNyK6Xy7HyRPiDcYAnaYhGsXLXC6wg", 282.63795424 * 1e8}, + &TokenPayout{"DsooDwaehaDjNDrEiUynkxa9nYZh6SGLaTY", 282.63795424 * 1e8}, + &TokenPayout{"DsoodxtdX1ZwrPbwWRgqBBMsiGVMYFFoG6m", 282.63795424 * 1e8}, + &TokenPayout{"DsooMUusJTStV4XSf9D2JdnKPY2WLycYdxR", 282.63795424 * 1e8}, + &TokenPayout{"DsooPH3NyoidFs53973F72HiiVrU8myTgCg", 282.63795424 * 1e8}, + &TokenPayout{"Dsooz3VsLvrZ6B6rPLnS7vSwstyFRb9GLRf", 282.63795424 * 1e8}, + &TokenPayout{"DsoPC3323uvw7Jx7UMdUpqtpyTTarTHFBy7", 282.63795424 * 1e8}, + &TokenPayout{"DsoPettWzTtFEVr3XbyCoghrmX4Q34PkPHe", 282.63795424 * 1e8}, + &TokenPayout{"Dsopf1h3fwi6eZyVWKdN7NtJASrPS8Jxmwi", 282.63795424 * 1e8}, + &TokenPayout{"DsoPHwVX3MiHjpYYoorb6YNQgnQ47PVJr85", 282.63795424 * 1e8}, + &TokenPayout{"DsoPrVNtcA7cP9ikkaDRxVA73NHxbHcGkNh", 282.63795424 * 1e8}, + &TokenPayout{"DsoQ5TcPxryTMLA8GZDsxpC2gB6rUtfsjPh", 282.63795424 * 1e8}, + &TokenPayout{"DsoqkE1jBJw5DkxyThnp9Rc1fUq5BuYhWxu", 282.63795424 * 1e8}, + &TokenPayout{"DsoQQe5gzYRtE1Y3nvsrNSDnXALqCoX83Hn", 282.63795424 * 1e8}, + &TokenPayout{"DsoqxUVPp7s3zv87VoNsPeKhJadZbwZcrdA", 282.63795424 * 1e8}, + &TokenPayout{"DsoqyAm2maQBp51t4Eu8EjQX7B2AoocZY4D", 282.63795424 * 1e8}, + &TokenPayout{"DsoQz1B87YGF34nnHgCHFNhgxJTJxCgAytR", 282.63795424 * 1e8}, + &TokenPayout{"Dsor8kbZX9CyzEAXsSngzFGKfyyVq6nznZq", 282.63795424 * 1e8}, + &TokenPayout{"DsoRC9ie6uAaTWRr6k5t6rXbzDNdaR1Ctj8", 282.63795424 * 1e8}, + &TokenPayout{"DsorUKZPKk2wFou34crr6wqzGyAzhKn2d33", 282.63795424 * 1e8}, + &TokenPayout{"DsoRWSi2ZkqximX9oVrLcGUteaYVeXPa31T", 282.63795424 * 1e8}, + &TokenPayout{"Dsos9whvU8D1mmgUCNLaZAN6tP8gmZtZaJH", 282.63795424 * 1e8}, + &TokenPayout{"DsosuCRZ9UDj2SToJgCek1921d8BAf5B1WE", 282.63795424 * 1e8}, + &TokenPayout{"DsosVxMiaBCq8qdaFAMDPFPoYkeXMAYSXME", 282.63795424 * 1e8}, + &TokenPayout{"DsosXVPoYdSM6vGzvMMTLKJDncA75oZGwpg", 282.63795424 * 1e8}, + &TokenPayout{"Dsot5t6rYA75qRzi388cuevUGCtwjKZZVAM", 282.63795424 * 1e8}, + &TokenPayout{"Dsot7dTZpGLJ2BAXKvfxZV3G4XDWLYmHzxF", 282.63795424 * 1e8}, + &TokenPayout{"Dsot8CZoNmtpBEttMHqsypYci6W67xpEBEo", 282.63795424 * 1e8}, + &TokenPayout{"DsoteNFqiESeRbc9t99WK2pxeAWEhof6d9S", 282.63795424 * 1e8}, + &TokenPayout{"DsoTJ55q76CT6FjrtjwZP1f4EGZjvJwiQEs", 282.63795424 * 1e8}, + &TokenPayout{"DsoTnH5dup2TXt11brws1sfzhPgg9UFVinc", 282.63795424 * 1e8}, + &TokenPayout{"DsoTNyXUcJGmXHapzeAB9RYTcJs2der5ky4", 282.63795424 * 1e8}, + &TokenPayout{"DsotSiKkrDePox1mo68Yrieo3FbeqN73kkF", 282.63795424 * 1e8}, + &TokenPayout{"DsoTwMXFE6j8H8k8VfHzLWqsKcugmZEJcgS", 282.63795424 * 1e8}, + &TokenPayout{"Dsou1sZEmdaPPxTWUz9d5YzW1pTuaQCKp5Z", 282.63795424 * 1e8}, + &TokenPayout{"DsougPbKU9GNfR6FcfWgtevqSCgnQHzFaR6", 282.63795424 * 1e8}, + &TokenPayout{"DsoUohcQuDptjdZLhoRGTQbDFBRMLuEJ5Dt", 282.63795424 * 1e8}, + &TokenPayout{"DsouREpFACQpjQhvs2PVdT8e9mQCHRPFY8m", 282.63795424 * 1e8}, + &TokenPayout{"DsoUrqhneFCMVAFLLeWFP6st4JgAgvDjjpS", 282.63795424 * 1e8}, + &TokenPayout{"DsoUYoLojVKfBjzfFDCDDEVtAAdjnU7dAC3", 282.63795424 * 1e8}, + &TokenPayout{"Dsov3kkWizDViE6zfyHrxqBVVsX1RZpunSR", 282.63795424 * 1e8}, + &TokenPayout{"DsoW1ibcHynxmbxwHhFDWFk5FHfUeJYbq3u", 282.63795424 * 1e8}, + &TokenPayout{"DsoWK1kHvnJf7esGKTg5fZMZbZs5ThoT6qx", 282.63795424 * 1e8}, + &TokenPayout{"DsoWQSsg915K4NDLrgbvUFyzg63jaQhtZTp", 282.63795424 * 1e8}, + &TokenPayout{"DsowxL3qd3XhuUzrq7cDDxSVV7TT3EXyDT2", 282.63795424 * 1e8}, + &TokenPayout{"DsoxDyizj1fWQLGSSCpSjwyE9kmQ9YjGc9g", 282.63795424 * 1e8}, + &TokenPayout{"DsoXTsXVkRYQVZhTJdFwieS95JAmt7Ff2kz", 282.63795424 * 1e8}, + &TokenPayout{"DsoY3j2LVsB5Ax9P9gZD5HdHNdvGvTPYNzu", 282.63795424 * 1e8}, + &TokenPayout{"Dsoy4G1eGs2miGRvi9DVzhGVee3DrEbrDn5", 282.63795424 * 1e8}, + &TokenPayout{"DsoyeiPtoYBbZvhQ43m1uPwqQzK4qxa5FxX", 282.63795424 * 1e8}, + &TokenPayout{"DsoyKAvtxcsNqFh5Y9o3fAkgJnmTM7KKsg3", 282.63795424 * 1e8}, + &TokenPayout{"DsoyYbYZxJQzRiy3SiePzVWimbpVMUKdcbR", 282.63795424 * 1e8}, + &TokenPayout{"DsoYzTTtgnQWcLb134m3En2jkDuM1oE6vQt", 282.63795424 * 1e8}, + &TokenPayout{"Dsoz1ZVz5iEeTyu2f7Dwj9PYfbDmveGDDAP", 282.63795424 * 1e8}, + &TokenPayout{"DsozitGzug9q39AHCUzvovwBSdKskLAcGyW", 282.63795424 * 1e8}, + &TokenPayout{"DsozJrLWyWiBCrVcBRPQqr3Z96nJf2eWD6Z", 282.63795424 * 1e8}, + &TokenPayout{"Dsp1LDtDNmiz7ueszhd3CUmgiWbXcMB1kt6", 282.63795424 * 1e8}, + &TokenPayout{"Dsp1W1AfWkV9NXg6CGWQNdP8Z4et2uTTKbt", 282.63795424 * 1e8}, + &TokenPayout{"Dsp1XFhfEjea3s8JLwaGeanyJijS2ZEE7ae", 282.63795424 * 1e8}, + &TokenPayout{"Dsp1zTmqWtfh3jYqvQqoZvhh5ZB4EuMpbcQ", 282.63795424 * 1e8}, + &TokenPayout{"Dsp21gMzo4vD1JZcsWRn59z4rMwo11h7yRW", 282.63795424 * 1e8}, + &TokenPayout{"Dsp2H57kJc3BBd1byNEp8ssg9Mu4wmZZfen", 282.63795424 * 1e8}, + &TokenPayout{"Dsp4aWakitFocjQnHgRQ3kRTMGc2UwAKFce", 282.63795424 * 1e8}, + &TokenPayout{"Dsp4H6LK6QapgfiGMHkS6MAVhE8EFoGT6eE", 282.63795424 * 1e8}, + &TokenPayout{"Dsp4r4qunWUo82zVWnmSYUKGYpxeHtfuSAG", 282.63795424 * 1e8}, + &TokenPayout{"Dsp5kdPQSMqqz8fdDaaZLYZPfWFu1p7ZyuJ", 282.63795424 * 1e8}, + &TokenPayout{"Dsp5tHSSm1TG5QEVyq7E7P8M2Cnk5dVz1HD", 282.63795424 * 1e8}, + &TokenPayout{"Dsp6Br2hbuVBCYcG9j5cEKP17r8KuGgrcoh", 282.63795424 * 1e8}, + &TokenPayout{"Dsp7hmqbhCp53ftVJdygCNvsw5ozZ3APHBD", 282.63795424 * 1e8}, + &TokenPayout{"Dsp8UvYYXujMLfShVNyiz9dyGcQEuhH4kPp", 282.63795424 * 1e8}, + &TokenPayout{"Dsp8VmAJb2RvF1bLTVK8XAgHp88tdsfJSjD", 282.63795424 * 1e8}, + &TokenPayout{"Dsp8Wq9zhusoqeaSrc4RnQVZDqHeRFFLeZe", 282.63795424 * 1e8}, + &TokenPayout{"Dsp9CSE5Tfr5399hHef6oxnXmD1XzMgzqE9", 282.63795424 * 1e8}, + &TokenPayout{"Dsp9TF29dzNwv4fkvPDmpRAMQxeW9orNNpb", 282.63795424 * 1e8}, + &TokenPayout{"Dsp9tRxzcEpfK8Ze74BEjTyoJRwSrtUAMcc", 282.63795424 * 1e8}, + &TokenPayout{"DspA7WzSpWREQfBkF9JiYbBuidih6YkKdo2", 282.63795424 * 1e8}, + &TokenPayout{"DspAFqmLpyq6ohKNa5Y7kaB4MoRsHVGDwYA", 282.63795424 * 1e8}, + &TokenPayout{"DspAPmuiFNWozE6AMaUw8D2HrVrxQH4emxd", 282.63795424 * 1e8}, + &TokenPayout{"DspAUCoizQohT8kSdfhC4c9eYAwHsDc3shj", 282.63795424 * 1e8}, + &TokenPayout{"DspB48wLpssZj1gDqHAKARxbhqe6zH5aJX6", 282.63795424 * 1e8}, + &TokenPayout{"DspBLwYoYS5YH7KaA1vTVRDjug5CXMMZXo2", 282.63795424 * 1e8}, + &TokenPayout{"DspBT8kbMJmj5DoN3NZVXNETwQvvtVGqsV5", 282.63795424 * 1e8}, + &TokenPayout{"DspBtrooa4VvWg3zgUaqY5RBkN3nej4byND", 282.63795424 * 1e8}, + &TokenPayout{"DspC3S3cgzBKfhRSCUFAB5sgk6gubdRPJBb", 282.63795424 * 1e8}, + &TokenPayout{"DspC58W8TQ1oqhFLm7yHjvEkFYEWkum7yei", 282.63795424 * 1e8}, + &TokenPayout{"DspCfUSJy68JWVKSZu68xs6dWJb5ZL7Z2UZ", 282.63795424 * 1e8}, + &TokenPayout{"DspCHfXVoTQ9oc79zLyAZ44PVdVxL2THGZ5", 282.63795424 * 1e8}, + &TokenPayout{"DspCWdNLYsJ7t77foshovQApV7WFXYvvbCF", 282.63795424 * 1e8}, + &TokenPayout{"DspDngpK7szD4EScUuzP5VKD2G8NQB8NCii", 282.63795424 * 1e8}, + &TokenPayout{"DspDTG9wFL1XdMeKmKnAv5Kg28sEdtmZ2Th", 282.63795424 * 1e8}, + &TokenPayout{"DspF3JFVDnCKKJkiSvyg35PHaEvCLXLAraE", 282.63795424 * 1e8}, + &TokenPayout{"DspGZQXGoLJLmYWEKmdt3AsXQQzmMdHcB1Y", 282.63795424 * 1e8}, + &TokenPayout{"DspHEvUYfQpc3FzFaWMT75yDWuaSqpLP6Ra", 282.63795424 * 1e8}, + &TokenPayout{"DspHKnn8NvitPn2BHNfp3cXysm8EnDEvcSP", 282.63795424 * 1e8}, + &TokenPayout{"DspJ67HVCezopgLwiVmXjThN49ZyL4axU2X", 282.63795424 * 1e8}, + &TokenPayout{"DsQy5ErxwTCNeM3ec9UggySgyzKATBamJUa", 282.63795424 * 1e8}, + &TokenPayout{"DsQyKBnzcMgW9uYQe9fLtnUaWaPV3SSM1Pf", 282.63795424 * 1e8}, + &TokenPayout{"DsQzbbuqgecBDHnccLKbgFuVeZhhZUfRR9m", 282.63795424 * 1e8}, + &TokenPayout{"DsQzLuqxopddKngo9h8wDZENHQmnSDAUyyQ", 282.63795424 * 1e8}, + &TokenPayout{"DsQzqHrBdKATo16mgRxBvSGxmHdHRYWhnMd", 282.63795424 * 1e8}, + &TokenPayout{"DsR1EZGyqpbRuw86g6fjkt76nUY5j8qv5Tt", 282.63795424 * 1e8}, + &TokenPayout{"DsR1WbbJNxgCUYtEkvHAHKy1Gt1HNyeb3PC", 282.63795424 * 1e8}, + &TokenPayout{"DsR21r3NdpGzZupXGny8aoeWMWyH2uzZ2VC", 282.63795424 * 1e8}, + &TokenPayout{"DsR2akX9bbGVEnmUYUR2eBTeg7ZjGgnsG3N", 282.63795424 * 1e8}, + &TokenPayout{"DsR2NURMyRmFNkWxTECYDw3mF3YFvaMUint", 282.63795424 * 1e8}, + &TokenPayout{"DsR2SXKcnZjohnPBEScjxPPyh2KLwN96CvH", 282.63795424 * 1e8}, + &TokenPayout{"DsR3VoPX3BX1SNL8ecDCwTbMNLHFDXMMTgh", 282.63795424 * 1e8}, + &TokenPayout{"DsR3XJ8VNNPVtds8qGXqL4dEZ2yVp3SGgP7", 282.63795424 * 1e8}, + &TokenPayout{"DsR4AcBoKXwDcRUHbY4VPA4X2m3VeniHVhk", 282.63795424 * 1e8}, + &TokenPayout{"DsR4PWsusDFKGutnB5Udxo9eya5atM7NtwQ", 282.63795424 * 1e8}, + &TokenPayout{"DsR5fetZpwR4f9gxFqMKTyCXYaTaZXok7ad", 282.63795424 * 1e8}, + &TokenPayout{"DsR5VpCJyrQhzDTZsbaY3DRt8eUzuhwKaxp", 282.63795424 * 1e8}, + &TokenPayout{"DsR63JAKt1Jahna8A5vZZFwkvSPWEqhxZG3", 282.63795424 * 1e8}, + &TokenPayout{"DsR6gYqVCVnVPS3K5AwSSEz1JbXATYYy9ii", 282.63795424 * 1e8}, + &TokenPayout{"DsR6WmxQmwGPih39wTbeTsnrSZ6yEeYr2GW", 282.63795424 * 1e8}, + &TokenPayout{"DsR7Bm9sQyL8FzuCU3bHbWExvzYEGeTkvgQ", 282.63795424 * 1e8}, + &TokenPayout{"DsR7bqNMm2EVxdoph42cMjUEgJr6gFdeVkf", 282.63795424 * 1e8}, + &TokenPayout{"DsR7XBt8u4TSJzx2rqCe7c9BEs4TZA7t4xE", 282.63795424 * 1e8}, + &TokenPayout{"DsR7XdWAXcV6pjMJVTVjuUzKbUavdAXmYdy", 282.63795424 * 1e8}, + &TokenPayout{"DsR8GJcE8wWYuYqh6x1r2qqCXeAyyKrarMD", 282.63795424 * 1e8}, + &TokenPayout{"DsR99u4rrMcwqGzBMMWt3M4jdPZ8aZNVAmM", 282.63795424 * 1e8}, + &TokenPayout{"DsR9DuxzJk4yvcsb3wRoRduxgDqZzgVp8CJ", 282.63795424 * 1e8}, + &TokenPayout{"DsR9hX5nnNYUBzfEBGZcXxmkxZzhGKT4nHK", 282.63795424 * 1e8}, + &TokenPayout{"DsR9MJWCcDt3UD6ghGosGtQooLRripHRYve", 282.63795424 * 1e8}, + &TokenPayout{"DsR9MNp23Rx1St6k9BLRZN5zWhd7mofANDs", 282.63795424 * 1e8}, + &TokenPayout{"DsR9sfVMonTzLUhAR2mSepHb869y9GptB5D", 282.63795424 * 1e8}, + &TokenPayout{"DsR9zc64gyCksoQns8N4FWeowcSzzR9hZVW", 282.63795424 * 1e8}, + &TokenPayout{"DsRAapEzoYpWtiHmgFqoJzhXaZ75euayNLf", 282.63795424 * 1e8}, + &TokenPayout{"DsRAMoXbW3guJYgHfnbFhEQ43CZmyvbHc3P", 282.63795424 * 1e8}, + &TokenPayout{"DsRAx11bBtsLeuLVheKFmpZRpWZ56SQJvaB", 282.63795424 * 1e8}, + &TokenPayout{"DsRb54ikw8ZERMvMkLTzk5tS5a99FnY4YPi", 282.63795424 * 1e8}, + &TokenPayout{"DsRBGdgyyG5zFzLhD1tWetnyMkPhqRNopfc", 282.63795424 * 1e8}, + &TokenPayout{"DsRBTSGdXw9rgDNbtegZJxGkXP1s9fABmGZ", 282.63795424 * 1e8}, + &TokenPayout{"DsRBWPZYUbP2ZisV4D2CjqqzX5jADQf4u7q", 282.63795424 * 1e8}, + &TokenPayout{"DsRbym89wVif1D5zS3okoAKkPKwzRaxgfJ7", 282.63795424 * 1e8}, + &TokenPayout{"DsRc2vR6rg8hpoVcYCf9sAVDGTgP2DwGMXu", 282.63795424 * 1e8}, + &TokenPayout{"DsRCTEXcf7SwxTQU7QgZNeA4VtvRW1zFNgw", 282.63795424 * 1e8}, + &TokenPayout{"DsRcUyjHmRbxnNAsVNxtWfzXoi1wdNoGnhj", 282.63795424 * 1e8}, + &TokenPayout{"DsRDoNAFDSTm99636pAV2y9adkkb7EwXBJZ", 282.63795424 * 1e8}, + &TokenPayout{"DsRdowrKtXbrPR4AUyboSRbuF2T9bTrba74", 282.63795424 * 1e8}, + &TokenPayout{"DsRdvUBmf26KmofngSyvY7WDwwwpePLGRm9", 282.63795424 * 1e8}, + &TokenPayout{"DsReGwf1CbWzts9tbnnbXnN6a4Af1rvsiiP", 282.63795424 * 1e8}, + &TokenPayout{"DsRFDdvWex1YSjepms9iCChBSYDMmDWcT4H", 282.63795424 * 1e8}, + &TokenPayout{"DsRfJVvNoPYihZ96QXB2C3DHsUkJh9MUCoX", 282.63795424 * 1e8}, + &TokenPayout{"DsRfn64z4jZC7CdE332KS41to2RwBNu6YNr", 282.63795424 * 1e8}, + &TokenPayout{"DsRfQV6bhegqUyAdMkmwZCQ6S3foPjh3iGZ", 282.63795424 * 1e8}, + &TokenPayout{"DsRgjhRkUDzKsZ9kcwvHWffQJBWEFnm7GvQ", 282.63795424 * 1e8}, + &TokenPayout{"DsRhdB7q8iVhuJuoc5ZRwBsUz7xaAF1Tfud", 282.63795424 * 1e8}, + &TokenPayout{"DsRHgGQGP5NzMKT3sPwA453MLJdqQj5CzH5", 282.63795424 * 1e8}, + &TokenPayout{"DsRHM2gQpnUtS7oavqrBr5QXKRqXZCFaNCX", 282.63795424 * 1e8}, + &TokenPayout{"DsRHtwgoUHPN8C3SDJnR2dPaREZRbMMJSns", 282.63795424 * 1e8}, + &TokenPayout{"DsRhXMYhjmsPQXQnqgAM9o7uFihVkrmLGWh", 282.63795424 * 1e8}, + &TokenPayout{"DsRi2F1CLynC6sVLcK7cRswjszcLRc3MvwW", 282.63795424 * 1e8}, + &TokenPayout{"DsRi2tvT1GWjoKHSAfUCtp36sXgmUJJNRav", 282.63795424 * 1e8}, + &TokenPayout{"DsRi3DmnRGL8H6G6R7TPGZwnN1moxWseCbP", 282.63795424 * 1e8}, + &TokenPayout{"DsRiaUywPcccmCEjddkigNe4QCffPjBnPHU", 282.63795424 * 1e8}, + &TokenPayout{"DsRiCK1HE8NqwLMox9viomaNQBZS4LNDSrf", 282.63795424 * 1e8}, + &TokenPayout{"DsRjFB3Wskq9XLA32LgaBTGeE7ixFZ1pX7H", 282.63795424 * 1e8}, + &TokenPayout{"DsRjrNqB3Aon8cgNQ5D1eAVGKBsoELYcJ9X", 282.63795424 * 1e8}, + &TokenPayout{"DsRJrxaaaEngsqdnH8BLVvkCv4UB555NPMz", 282.63795424 * 1e8}, + &TokenPayout{"DsRJYmAb3x3gB2ZrEnRT16HYnMBW2JrSEBf", 282.63795424 * 1e8}, + &TokenPayout{"DsRKeW6CZeVn2WHXmwRbvo6FXBgnScxN9SF", 282.63795424 * 1e8}, + &TokenPayout{"DsRkSZm5o9zqE41hAzUBmUHPgH19w9s7R5B", 282.63795424 * 1e8}, + &TokenPayout{"DsRkX5caAdLfUAhHwtEwU3B5QheGKUa6Lxr", 282.63795424 * 1e8}, + &TokenPayout{"DsRLBAt9z4aHNQZTaXRvKVrfRwZxpmPh7GJ", 282.63795424 * 1e8}, + &TokenPayout{"DsRLCZsY3a59KpYALcQ3DvbQUQX7i4NdqgV", 282.63795424 * 1e8}, + &TokenPayout{"DsRLoAnZxZHgRr36Hrp97VxkKH7MWhbTDVM", 282.63795424 * 1e8}, + &TokenPayout{"DsRLPCxWUVupypKiDJZQRH3TRDU8jdx3z8J", 282.63795424 * 1e8}, + &TokenPayout{"DsRLQzmKdoQrBqBAWY2iX5rmLyUYdTZEDew", 282.63795424 * 1e8}, + &TokenPayout{"DsRLWJtUApozAH1WTGrEpJcNEwakPCmNjWk", 282.63795424 * 1e8}, + &TokenPayout{"DsRmkMudga8iNzPsU8FCkG8zPed6PL3RuSb", 282.63795424 * 1e8}, + &TokenPayout{"DsRmknBX9cm75sfxwiEzfFA6dw8Dr42Jgsx", 282.63795424 * 1e8}, + &TokenPayout{"DsRMTsZFUQX4VDDhdT2HngEwYnzHYW3NBoq", 282.63795424 * 1e8}, + &TokenPayout{"DsRMWEpuYkKNCmRrFhWXx5JzHqFr28pxkVX", 282.63795424 * 1e8}, + &TokenPayout{"DsRN61L3J52ocGWHchcJvJTRqHuT22VM1Po", 282.63795424 * 1e8}, + &TokenPayout{"DsRnb2Bf8kxXmJ7njgyuTwUCerWV6HnMYcN", 282.63795424 * 1e8}, + &TokenPayout{"DsRoN5Co6jjmksCPwtneXrvwbzFjiNGcsn6", 282.63795424 * 1e8}, + &TokenPayout{"DsRPCwnvWGoqxd8jAAUFU8ZRGk93JRVe2Vv", 282.63795424 * 1e8}, + &TokenPayout{"DsRpdG69kkDzHwLfBPiGHmcVQnwDRiqfEEu", 282.63795424 * 1e8}, + &TokenPayout{"DsRpPUXbjHEBMsUo4ajjdDu1hrNKCTScNqt", 282.63795424 * 1e8}, + &TokenPayout{"DsRPyvnATX9wHBfrJn7a4JQsCBHdDd6bF6Q", 282.63795424 * 1e8}, + &TokenPayout{"DsRQ79HJ7y9v4HsHQwT7GnMaBSpqF6vYP3p", 282.63795424 * 1e8}, + &TokenPayout{"DsRQFB88NAb8bXZScyNvm9BeQCpHiPKHYN9", 282.63795424 * 1e8}, + &TokenPayout{"DsRQNj4rGUhxguPCKAZsaGTso88RH6mxhjz", 282.63795424 * 1e8}, + &TokenPayout{"DsRqSN9oCnBp6eaxdqmEPrcfKrjgG3eU2Dc", 282.63795424 * 1e8}, + &TokenPayout{"DsRqSvRD4KJFMrTJUDw8ikEzprqLXFrnGZ8", 282.63795424 * 1e8}, + &TokenPayout{"DsRr3o3EUXpb14f25sB4R3nywAoc3eKb7Mh", 282.63795424 * 1e8}, + &TokenPayout{"DsRs89tkCwd4W9TnDpoa1aRw6UVETnPr8Wj", 282.63795424 * 1e8}, + &TokenPayout{"DsRSSdus5wnFSJFz7udg9aCyJwxkvEzrmSk", 282.63795424 * 1e8}, + &TokenPayout{"DsRThbABHHDjP7rmeN1Be43rVatF5Qf7XNH", 282.63795424 * 1e8}, + &TokenPayout{"DsRtNRbaNYtrEdCa2hQJttD1eDWkd4gbkz4", 282.63795424 * 1e8}, + &TokenPayout{"DsRTotP8zpy1QD6WvyTUB4Ut8ouxyiqP6xM", 282.63795424 * 1e8}, + &TokenPayout{"DsRtRP4ghpKmCygz1Gx7niCB6gq86rkcZbE", 282.63795424 * 1e8}, + &TokenPayout{"DsRtTEPXHkTPybkkzeRGjDp1ZjFFjtFBxBU", 282.63795424 * 1e8}, + &TokenPayout{"DsRuBAhcKbfEMKunxv3MwgTKVyWy7Z1mps7", 282.63795424 * 1e8}, + &TokenPayout{"DsRuHSpf2BLLF5a8XQKsbafYZnbtTv1rxPF", 282.63795424 * 1e8}, + &TokenPayout{"DsRvXfRP2x6LTBs6i1oqCZH3BcKv1TtBt69", 282.63795424 * 1e8}, + &TokenPayout{"DsRW8V1dJN7QGDwyrJEvCiMQsnsDxTWa9nd", 282.63795424 * 1e8}, + &TokenPayout{"DsRwgrYfY7KfBbax7rPtjamk4itvEGnXGfi", 282.63795424 * 1e8}, + &TokenPayout{"DsRWMKfBhwoTmL8tQiTmYPpwvyC3WCS7y5E", 282.63795424 * 1e8}, + &TokenPayout{"DsRXNCCS8FCJqG3qHkKVZdSRq8rpZEXAQa4", 282.63795424 * 1e8}, + &TokenPayout{"DsRXv3XtZmHpFqEf4BPyHPtqfWDdxf5kQWT", 282.63795424 * 1e8}, + &TokenPayout{"DsRxVmjawDtazMKfk3LK5KkywR3zhnGZccP", 282.63795424 * 1e8}, + &TokenPayout{"DsRyM1Rm886onRZzRdphZb4HVMacUrpXekn", 282.63795424 * 1e8}, + &TokenPayout{"DsRzgNpZjgBj2Kz3XypByAmmkMTNXREryAt", 282.63795424 * 1e8}, + &TokenPayout{"DsRzGR9hKpNjmTb5cr4N2D6ZoNx5zq42Rto", 282.63795424 * 1e8}, + &TokenPayout{"DsRzHDgvMqAKg7rHjwwXCFqTnDfQk6pur6b", 282.63795424 * 1e8}, + &TokenPayout{"DsRZk1bPjHMzbfq792xp1WamuhP9y9nBbcZ", 282.63795424 * 1e8}, + &TokenPayout{"DsS1LYkrTwcAQiEicAUZNZPqu5jA1ZTrEAB", 282.63795424 * 1e8}, + &TokenPayout{"DsS1pyyXp8Q89CqGG17mnzfr76nWgePUqxo", 282.63795424 * 1e8}, + &TokenPayout{"DsS1UFxyYnpyJKFXkKhh4P7ck6S2GGCnccu", 282.63795424 * 1e8}, + &TokenPayout{"DsS2c3f44TDAqDSZnEDZuzxTiTg9jxKgpJu", 282.63795424 * 1e8}, + &TokenPayout{"DsS2HYNA1UYtwbrzwygUcK58VUrp8h528fg", 282.63795424 * 1e8}, + &TokenPayout{"DsS32bMFraPzoftUaCtKBS4k26KwjBns1MN", 282.63795424 * 1e8}, + &TokenPayout{"DsS3iicQYcTRpHBGeP6Uvruwu5FS4D3U2bs", 282.63795424 * 1e8}, + &TokenPayout{"DsS3kD3TUoKyYfdN94FrNvy98i9NXe5VgSS", 282.63795424 * 1e8}, + &TokenPayout{"DsS3n3dTSAZFcg6MydTrPZbdYe1xpEbewuq", 282.63795424 * 1e8}, + &TokenPayout{"DsS3YCj2ZqZ7wwnLfj1eZcrHWAtghSrcvRj", 282.63795424 * 1e8}, + &TokenPayout{"DsS4XwM1p8YtoQi9ugPS3c7D2bk7w828Pqs", 282.63795424 * 1e8}, + &TokenPayout{"DsS4yvjWAyDztnHQsBfnpR7j645tQT7R1Rh", 282.63795424 * 1e8}, + &TokenPayout{"DsS5puNwsxaF9HQCSkgg5fLf1cGc1orabMP", 282.63795424 * 1e8}, + &TokenPayout{"DsS66oobmpLbf6HKTTbDLVgtHtRaSB9nDUt", 282.63795424 * 1e8}, + &TokenPayout{"DsS6ay4m3eAWiqYLf21QL8SLA7G6dAzVibt", 282.63795424 * 1e8}, + &TokenPayout{"DsS6JHu116APp6Mp4MH9bhuPV23JHRimVUd", 282.63795424 * 1e8}, + &TokenPayout{"DsS6m9LXvKGex1HJ8z6eRTfetFztMEkqxXb", 282.63795424 * 1e8}, + &TokenPayout{"DsS6sAKPGczZ2HUTJSqbu9ucu9ASndwG5N5", 282.63795424 * 1e8}, + &TokenPayout{"DsS6vZjLjxQFYyvgeDjkRNahN654e8YcVKF", 282.63795424 * 1e8}, + &TokenPayout{"DsS74nrbbK5NreWdhuVfWyMqR97HSjcZo47", 282.63795424 * 1e8}, + &TokenPayout{"DsS8cZdbxuDo2Jc68Ho37bqWvF6XWQDnJUu", 282.63795424 * 1e8}, + &TokenPayout{"DsS8NbEwtGDGf8T9ZUam9i6hvLgpweYn9bP", 282.63795424 * 1e8}, + &TokenPayout{"DsS8VWLSs99PkLRstBZ4xQ6eJa8y9xp9wsw", 282.63795424 * 1e8}, + &TokenPayout{"DsS9HbNQ3VPZeS2FAHSoV3Hea9G6zDGgMyp", 282.63795424 * 1e8}, + &TokenPayout{"DsS9kyEWJzzd7Qn5V5xpdTez8ncBKzudzpC", 282.63795424 * 1e8}, + &TokenPayout{"DsSa7PVJUdjQ35Sqt1zEJmeapbH5iFbxESU", 282.63795424 * 1e8}, + &TokenPayout{"DsSAfrhD2T5KjfwXkHEQ5CdRMmEwB4zp1Bd", 282.63795424 * 1e8}, + &TokenPayout{"DsSAhR42Nbjf6nZr4Ax7AnpKqtvWjB1wmAm", 282.63795424 * 1e8}, + &TokenPayout{"DsSaHVawmecdbQsoSvkK7zWxJMsJ4gbaXin", 282.63795424 * 1e8}, + &TokenPayout{"DsSaY1vSLZKsjLWoz38q4YRmhzJpVbRbQp6", 282.63795424 * 1e8}, + &TokenPayout{"DsSbDzTq5j1cWZqWop31rxuWb5e2riiJkxu", 282.63795424 * 1e8}, + &TokenPayout{"DsSbKawKnMCqL3TMKhaL9bWhG8D2WwkjDcq", 282.63795424 * 1e8}, + &TokenPayout{"DsSBLn2fU2KTh83v7VStLkmcC4ocPEdLNBA", 282.63795424 * 1e8}, + &TokenPayout{"DsSBSjxQhGyzQ37VSvzDjXTmrRs1ScnAg1i", 282.63795424 * 1e8}, + &TokenPayout{"DsSc867mBpKZvaCyfdHCNdi5RA8YCLz3f1H", 282.63795424 * 1e8}, + &TokenPayout{"DsScERhZwmBWQtoiS7TjkJkXNB3urhsB76H", 282.63795424 * 1e8}, + &TokenPayout{"DsSCGSmE3deQA4bMpBcu9KWkYMTiFaK3wFM", 282.63795424 * 1e8}, + &TokenPayout{"DsSCqp9eg4rkPnPpCd4Athbg3bLK5BZNJbn", 282.63795424 * 1e8}, + &TokenPayout{"DsSCSFvjsfrkFAKRKa4ibvAh7KM3yfDRBWX", 282.63795424 * 1e8}, + &TokenPayout{"DsSCu87yhyfcPpGGWCjHFJtJJKak4YTQimN", 282.63795424 * 1e8}, + &TokenPayout{"DsSd9KwnMfDbjVmBa8qEPJ2ryJnRbY8yrUz", 282.63795424 * 1e8}, + &TokenPayout{"DsSDGHWJPUqsvW3qqvD1mgyfbFxXbEMxsdD", 282.63795424 * 1e8}, + &TokenPayout{"DsSDKoG1JSPPkiiUGzbqxd8yyPzwGju3HJe", 282.63795424 * 1e8}, + &TokenPayout{"DsSENwC7TNgYHzyUeZuvaLfNs4bKRVfduuF", 282.63795424 * 1e8}, + &TokenPayout{"DsSey7msXhE6mYLpCrrzornhyNeFf2AwYku", 282.63795424 * 1e8}, + &TokenPayout{"DsSf1BtauepMNmoajEmvBp1J6H8zdwfnYUq", 282.63795424 * 1e8}, + &TokenPayout{"DsSfd5v4zPmVK8ygDSu5jF92SWezR7bUsVY", 282.63795424 * 1e8}, + &TokenPayout{"DsSgCcLtyU5X2t55MUuHFeebPfSNnp8CekD", 282.63795424 * 1e8}, + &TokenPayout{"DsSGgC8QxfwyFvuX3aywFDJw58vvFEFpnC5", 282.63795424 * 1e8}, + &TokenPayout{"DsSghdZLZtBd4JmGwVLJv5ZcpGyNXYTGcPQ", 282.63795424 * 1e8}, + &TokenPayout{"DsSGSfwSKYvkb7No3EmRPYRVyVHtA8VXNgB", 282.63795424 * 1e8}, + &TokenPayout{"DsSgSv33rSfFiApwqucCFckTgQPREqGPnun", 282.63795424 * 1e8}, + &TokenPayout{"DsSGysYiXGQDpTaAnqBmMCbjFE1GXoawZ3Q", 282.63795424 * 1e8}, + &TokenPayout{"DsSgYtUnywvuGKKfUcKMvNJpz2rT6EbksSy", 282.63795424 * 1e8}, + &TokenPayout{"DsShnbJVWddUwnrx7YoP5nmaV5mhZYw2jan", 282.63795424 * 1e8}, + &TokenPayout{"DsSho6rscRsT9xukHAboE7SgCmSqXG2sTqe", 282.63795424 * 1e8}, + &TokenPayout{"DsSHrLGr6zD3WbYyr24dJfsiiuUQ8d5Dxu2", 282.63795424 * 1e8}, + &TokenPayout{"DsShyUaepNAjXV6aKkjN9G2dyNEY8Er6wVe", 282.63795424 * 1e8}, + &TokenPayout{"DsSHZMbfQ11zt5cxTPCB7Z7c9F5hUdkNKGr", 282.63795424 * 1e8}, + &TokenPayout{"DsSi3jfpCuuZnT9QmtXCXcedx4VD5vHzQ1m", 282.63795424 * 1e8}, + &TokenPayout{"DsSiVZnVA8JM8cdijvr7DKU3fTYkPNSg2Yc", 282.63795424 * 1e8}, + &TokenPayout{"DsSiwEVRig5hQFCpkXacb6dWjXGY8GuFasc", 282.63795424 * 1e8}, + &TokenPayout{"DsSjcb7dZoPAvLztM9QfaRL8YicGwsDsGy3", 282.63795424 * 1e8}, + &TokenPayout{"DsSJebYcF1HAs15sjvpYiy3WhTC8zujg2c4", 282.63795424 * 1e8}, + &TokenPayout{"DsSJevPg21gXMrBwuAKH5pYDRX3ABJ6MReR", 282.63795424 * 1e8}, + &TokenPayout{"DsSjiDpQSNcwwJ2a9DUjJ4BvcUt9NTnUGj4", 282.63795424 * 1e8}, + &TokenPayout{"DsSjuXJx9uTZgkrbFYx2DEMLmS5LycsQVW3", 282.63795424 * 1e8}, + &TokenPayout{"DsSk5kAc3yJdY4zoH1gx3GSydMErV4uGHmu", 282.63795424 * 1e8}, + &TokenPayout{"DsSk8dcaAPQiHWWJHyunwDwjAaEDAUjeWap", 282.63795424 * 1e8}, + &TokenPayout{"DsSkbRqkrZNDiejXVvjybmzQZBtdiCmXPWu", 282.63795424 * 1e8}, + &TokenPayout{"DsSkCYTpo9iSUU4wU449qcaBM3tiGF4Fya2", 282.63795424 * 1e8}, + &TokenPayout{"DsSKhnh96qkjSsbRmPkUWFQzckVdYi9jnE4", 282.63795424 * 1e8}, + &TokenPayout{"DsSKjnUH5FXoXcnuHEMMbUGiFG2gF4u5tyA", 282.63795424 * 1e8}, + &TokenPayout{"DsSkvxdoZx6Qx57jPk5od8saLdnavL5eEUB", 282.63795424 * 1e8}, + &TokenPayout{"DsSLp2WAUur5wS2c4NE64pHJeB3KRenXC17", 282.63795424 * 1e8}, + &TokenPayout{"DsSm3usM5QiYcUjKPq8mciX1ShsYdkKVyBT", 282.63795424 * 1e8}, + &TokenPayout{"DsSm7vA7QmdWa1rJickV6Ms86smCvGYWx4y", 282.63795424 * 1e8}, + &TokenPayout{"DsSMwkhop3HSBc29rfELp6727p565JBdVwk", 282.63795424 * 1e8}, + &TokenPayout{"DsSmxVsKY8veMrhJT6bk1gcqWtbUM2gbiqL", 282.63795424 * 1e8}, + &TokenPayout{"DsSnDHQvynqKNCarYJHKAf76amTgKQ5jqGt", 282.63795424 * 1e8}, + &TokenPayout{"DsSndU27gFzLke68DyFvM1A1Ci29kxnAmDn", 282.63795424 * 1e8}, + &TokenPayout{"DsSnFrXPm8wnopXyFmwcZxvck9nbYuQvB5w", 282.63795424 * 1e8}, + &TokenPayout{"DsSngfAwoVkeTzQdRFNLQwkThagVPzKNshA", 282.63795424 * 1e8}, + &TokenPayout{"DsSNk2Uek9GeQKVbymh6zMG7EgsEpTGwpdd", 282.63795424 * 1e8}, + &TokenPayout{"DsSnTCsAjf1GSRmDjp5nnuJQm1bueEpqWte", 282.63795424 * 1e8}, + &TokenPayout{"DsSp31DoGcq7KnzAjCrRzSh5jj6Vu6yyXqs", 282.63795424 * 1e8}, + &TokenPayout{"DsSPprYJMKgyuLXspCv1G3tD3iS3s4nU3Qw", 282.63795424 * 1e8}, + &TokenPayout{"DsSpyfZMu8nRaRqBpibMvYTkUsHsvPob8b1", 282.63795424 * 1e8}, + &TokenPayout{"DsSPzYiMGugZ57SjFX9r16vJqeMAAH1C2zR", 282.63795424 * 1e8}, + &TokenPayout{"DsSQGymxhqxpdx2v9Hxn1uedmSLj81ipx6S", 282.63795424 * 1e8}, + &TokenPayout{"DsSRHMeD1yu43e4guPq4oLXdnftE6YrZ8BD", 282.63795424 * 1e8}, + &TokenPayout{"DsSRuGQsKEtszgnnwDaiRMrYMdCsPepiVbU", 282.63795424 * 1e8}, + &TokenPayout{"DsSSC2fPoWW62NNdFixcwVm2EgcrrYcAgBE", 282.63795424 * 1e8}, + &TokenPayout{"DsSsCGBPVxkkKqrg59A2YNz8CbWbWCNUAw7", 282.63795424 * 1e8}, + &TokenPayout{"DsSseJz26B554TXAvZs4H6QYA1kPGTxDQtj", 282.63795424 * 1e8}, + &TokenPayout{"DsSstec5devnCHeDM6LqpGvWQuris7pcw9F", 282.63795424 * 1e8}, + &TokenPayout{"DsSsWBVSYwbVvAamPbnC7PYnbii4nh3YbGE", 282.63795424 * 1e8}, + &TokenPayout{"DsSTnu9GzN8TEQ9LBLVmt4H7X7jbFTV6Tt8", 282.63795424 * 1e8}, + &TokenPayout{"DsStQkTYxVR1ob1AjmEyprP5ndhjZsX2Vbv", 282.63795424 * 1e8}, + &TokenPayout{"DsSTTW4EFcTP5Cf9SRDRkaCioMA3zX1QiV4", 282.63795424 * 1e8}, + &TokenPayout{"DsStwtooUHBxheuE6pCn11qd5qejwoMgCLa", 282.63795424 * 1e8}, + &TokenPayout{"DsSTzvu7WZN3Jqy4gDHtpsXbnVJ6CAMbtCr", 282.63795424 * 1e8}, + &TokenPayout{"DsSU5LLYnmKNjRdRJy7fBwh5awGAYH9z1vw", 282.63795424 * 1e8}, + &TokenPayout{"DsSUAhnSHNPFzHotuUDV5iL8SFBoMtmh1hy", 282.63795424 * 1e8}, + &TokenPayout{"DsSUvX4663QoRcgAvec54yvz9PF7eLStwdm", 282.63795424 * 1e8}, + &TokenPayout{"DsSvswbCYsHLkzyKuQjL7D4bahEeAtvM5uT", 282.63795424 * 1e8}, + &TokenPayout{"DsSVV4Jp2HdWsweBSLRhB6aGhawS2Ns39iV", 282.63795424 * 1e8}, + &TokenPayout{"DsSW4GVFVMecrFvsoqZkrNXFukipy9k2jCy", 282.63795424 * 1e8}, + &TokenPayout{"DsSw66xweRxqSm26DDPxrRmxAmZj85qVryX", 282.63795424 * 1e8}, + &TokenPayout{"DsSW7Fpg6SnZZbduCctbAVNgMvieUbL29kj", 282.63795424 * 1e8}, + &TokenPayout{"DsSwCM6LL5bN2A3p3MJJuAy3Qfz6PNUq5Pg", 282.63795424 * 1e8}, + &TokenPayout{"DsSWrh4qB7H2Q4zqkzcHu23or2ERysBX1xw", 282.63795424 * 1e8}, + &TokenPayout{"DsSwu7fSu2wnQgpLitgrv643Qtwfhc6qjHz", 282.63795424 * 1e8}, + &TokenPayout{"DsSx22qqP4dY1wQaBpVzqvEgJHXPbuV5gd4", 282.63795424 * 1e8}, + &TokenPayout{"DsSxC74oWop8QC4TcSbjdbanGAVbxF9yadd", 282.63795424 * 1e8}, + &TokenPayout{"DsSxEdLm9uVJRHqJkbxBKnDhK3H5hApJBKh", 282.63795424 * 1e8}, + &TokenPayout{"DsSxQUN6QNrTj1ytdWBPBhEnc1gZgvqQBq6", 282.63795424 * 1e8}, + &TokenPayout{"DsSXUN47YagP3RegGgNSzEFBU156BR8WNNY", 282.63795424 * 1e8}, + &TokenPayout{"DsSxv26yqsHZCmXJkyLUM2grLYxaeRKhARC", 282.63795424 * 1e8}, + &TokenPayout{"DsSXxUH7yHuFfqf6BChALrE1pv9H7pRfk9S", 282.63795424 * 1e8}, + &TokenPayout{"DsSY5k5LC9rAzUDkzj5MRqkcioQsPmdxsa7", 282.63795424 * 1e8}, + &TokenPayout{"DsSyGxEJrrnUP7N8BQi1RWQYDZJVD9bQWCm", 282.63795424 * 1e8}, + &TokenPayout{"DsSYgyNtmhpv316W8V72CVSs6tKVQWrT6UR", 282.63795424 * 1e8}, + &TokenPayout{"DsSYJDYXqWiKVbXWSoe5nHNF63DRygUqqb2", 282.63795424 * 1e8}, + &TokenPayout{"DsSYvsKrv3NfkqXkwUB7Mu7F8izE1eFxUbo", 282.63795424 * 1e8}, + &TokenPayout{"DsSz5n7Uet4C3dgc3iPS6nK6JrfHKGoiqzK", 282.63795424 * 1e8}, + &TokenPayout{"DsSz6eEmxgVY4pB6wftdvFPEF795uq6GneP", 282.63795424 * 1e8}, + &TokenPayout{"DsSZhKC36x2iRjvjqpJw5TD5iKm1sdKs3Yz", 282.63795424 * 1e8}, + &TokenPayout{"DsSZpNDLFGztdUmBUZsxfPpx71Bwm8DXa13", 282.63795424 * 1e8}, + &TokenPayout{"DsSZtLQVEvP9tseWMzcb1iQ74oRgpPjtfKx", 282.63795424 * 1e8}, + &TokenPayout{"DsT1dfBGpLjqvZcUWvDquASYQum7D8ttToW", 282.63795424 * 1e8}, + &TokenPayout{"DsT1NoQp5nCVJtC3o75u5QXZMVxYVdMmpVU", 282.63795424 * 1e8}, + &TokenPayout{"DsT1nTHE8fr9fYpuVNpMXUMMSokut49rAGi", 282.63795424 * 1e8}, + &TokenPayout{"DsT1X5QPK4EY3g3JvJkNeyYwzC8VrEurPwa", 282.63795424 * 1e8}, + &TokenPayout{"DsT28J44pCaU8rbnZVVXxTxDJAcomvppwKj", 282.63795424 * 1e8}, + &TokenPayout{"DsT2wnaSN2gN5K8CTYvosaSwu16K1RQZKyi", 282.63795424 * 1e8}, + &TokenPayout{"DsT4AqgAq1C4K3wxAgU9weSXuf2tEJoDYe5", 282.63795424 * 1e8}, + &TokenPayout{"DsT6dgvPVwyThjzmyHwAZQtn3A6Yd5vykA3", 282.63795424 * 1e8}, + &TokenPayout{"DsT6KpLERbRgXDR87zgWPxDn8nSSSNFxTUc", 282.63795424 * 1e8}, + &TokenPayout{"DsT6xakdb8PnAFT8uTCBU268aehkorZsC9Z", 282.63795424 * 1e8}, + &TokenPayout{"DsT7dDVVrBrEqvmBDp7Rw7P7YrriwG8Lp3k", 282.63795424 * 1e8}, + &TokenPayout{"DsT7S173u9bjZPDNw9PDZC2MokF1zWdRKy6", 282.63795424 * 1e8}, + &TokenPayout{"DsT7Ys1wXLVneNxYEKWzYEKUM2JoHvc2Guq", 282.63795424 * 1e8}, + &TokenPayout{"DsT89SGqn8RL4LTeX1wdtN6EW6uE8pVRcxD", 282.63795424 * 1e8}, + &TokenPayout{"DsT8GhtvknYXZBdPCJneTJP4wA6vHd9dyUV", 282.63795424 * 1e8}, + &TokenPayout{"DsT8pisG6CBexPtPywC8QZMFPi56Zbn5NSf", 282.63795424 * 1e8}, + &TokenPayout{"DsT8rrfNgVoTr59HabZQBMSd5GpLpzHP2Tw", 282.63795424 * 1e8}, + &TokenPayout{"DsT8TfLTCyPmWcHhbjjSctPKk8oKDZp7Scs", 282.63795424 * 1e8}, + &TokenPayout{"DsT9CmGxd536X9sVrxUg46kQ9nxyw3W4SCJ", 282.63795424 * 1e8}, + &TokenPayout{"DsT9D8UrmLZGqtkxLaAmnZUwbRQMSM5rfhD", 282.63795424 * 1e8}, + &TokenPayout{"DsT9Gum1AcKCc7fTysLjbnpNEAgVHQejq3y", 282.63795424 * 1e8}, + &TokenPayout{"DsT9LkNzjZSqCLHYJmdc9iGa5EZx61Ra3zH", 282.63795424 * 1e8}, + &TokenPayout{"DsT9saqa2mEwHXutLKFs1TTXTQeDQ2vWV5P", 282.63795424 * 1e8}, + &TokenPayout{"DsT9zi4vf9y5g7WbdmBGXK3woG3AocvJmTT", 282.63795424 * 1e8}, + &TokenPayout{"DsTaEgGn1AD5JViju9t85jvp2SFvvER1of7", 282.63795424 * 1e8}, + &TokenPayout{"DsTAFp72zNgtLYXksPQHnSwkCbYz876FF7d", 282.63795424 * 1e8}, + &TokenPayout{"DsTAfQuZHSyUDxHpZGhzRCeCd9BuYAHTYG2", 282.63795424 * 1e8}, + &TokenPayout{"DsTAmD2fQ7HQ9Bjn32buqDuUo23PiPLxwMp", 282.63795424 * 1e8}, + &TokenPayout{"DsTAPDywLG3W9axdbcLzx47963kdVPRijfj", 282.63795424 * 1e8}, + &TokenPayout{"DsTBFFFztZjbPPQe7GZrpgT3GLKKnAjeaXX", 282.63795424 * 1e8}, + &TokenPayout{"DsTbqPNyLSLBXhZFYn8C4AvUQ5bTZypJXK3", 282.63795424 * 1e8}, + &TokenPayout{"DsTbSadyTJvi3z1QBZBsZwo6Z9DGNRTF5uz", 282.63795424 * 1e8}, + &TokenPayout{"DsTBVADKF2BsdeNwp3e4kzbQNLzuEPQa7UG", 282.63795424 * 1e8}, + &TokenPayout{"DsTCD41t4WqWvcyvGv8JzARPwsewuTbP28P", 282.63795424 * 1e8}, + &TokenPayout{"DsTchaFbPnkVG7vXXSrGYcv1WkeDM14N9LT", 282.63795424 * 1e8}, + &TokenPayout{"DsTCmbJvJxwtbVxArGJMqhnAYBVkCYmcQxT", 282.63795424 * 1e8}, + &TokenPayout{"DsTCQci2RmtBUY8U99p3eDi893MCb3Quv53", 282.63795424 * 1e8}, + &TokenPayout{"DsTCs3JNgozprNwczWFB6WuVr7LY62zHerS", 282.63795424 * 1e8}, + &TokenPayout{"DsTCstZfhfrmoiz2eJEsqtGLgrAz5Teccft", 282.63795424 * 1e8}, + &TokenPayout{"DsTcVwEKcKx3AKG8VVMs2pBr6VPiP2oJ8f7", 282.63795424 * 1e8}, + &TokenPayout{"DsTDdSRf1MU7FKxdUaK6D9oC6p7cziXEEB6", 282.63795424 * 1e8}, + &TokenPayout{"DsTdmJk73PnszN31mZJX3rAq4MhYwDA71wF", 282.63795424 * 1e8}, + &TokenPayout{"DsTdNtEv8QR4HjUcuyiYeqRAUc7gx36wmPD", 282.63795424 * 1e8}, + &TokenPayout{"DsTDYUoDciP8V98hWR7XwrH5P5cpXbT1hNE", 282.63795424 * 1e8}, + &TokenPayout{"DsTE1VeGAgWKhjPass7RPybb13LhKi3F9qU", 282.63795424 * 1e8}, + &TokenPayout{"DsTER6wUQeQzZv7ScgbCG6hRyEd7NXJZ5m6", 282.63795424 * 1e8}, + &TokenPayout{"DsTevGU9YkG3TEwjVjhgHdy3SWyVYD5rhaR", 282.63795424 * 1e8}, + &TokenPayout{"DsTF5fmcNssMB6qZjLihkQDBNHksPuwuaHT", 282.63795424 * 1e8}, + &TokenPayout{"DsTfidDYwaSuZeY1d75ZASurXfBvjTGAzhV", 282.63795424 * 1e8}, + &TokenPayout{"DsTFPhFigHRtGzgTMxct6mYxEhVnwVXXVyf", 282.63795424 * 1e8}, + &TokenPayout{"DsTfPLuXtuFCo9qaKqPT3t8QtfnsMia4ex3", 282.63795424 * 1e8}, + &TokenPayout{"DsTgdzHEeAaSenjvqipDyii2xJvnkMHh1EV", 282.63795424 * 1e8}, + &TokenPayout{"DsTgjkXn3yGB4z4PqEMRNYCtQuzZEDyQfZC", 282.63795424 * 1e8}, + &TokenPayout{"DsTgw27adGbJ5SZGkLcK2TPaRpcBEzqdWWR", 282.63795424 * 1e8}, + &TokenPayout{"DsTGwEEswQUomVMxSejQQbS3opiHFuGwgK4", 282.63795424 * 1e8}, + &TokenPayout{"DsTGxEs2HVnhWCDzEoT9rnv5do9BfFgoBpq", 282.63795424 * 1e8}, + &TokenPayout{"DsTGXyU6xKU5TyM2aQR5m1PTbBhVyP3T54e", 282.63795424 * 1e8}, + &TokenPayout{"DsThCxXTLC6qfWHSSN1CQKyRYeWCgph6MrX", 282.63795424 * 1e8}, + &TokenPayout{"DsTHGJUZGEW9MuqHG5EbCa59hGBNWzkemmW", 282.63795424 * 1e8}, + &TokenPayout{"DsThJ9iQbsSJMR6TyXFJFuah2FAyx8SnF4n", 282.63795424 * 1e8}, + &TokenPayout{"DsTi5ATYAmmseGbiv6XjrTtj8qoSs7CRdyp", 282.63795424 * 1e8}, + &TokenPayout{"DsTi5EQFPJ7tNCH3muXapNYVVu3Vw27jo89", 282.63795424 * 1e8}, + &TokenPayout{"DsTiiPNyXwbDQs138vJiXeBYyRpeHjpz6SP", 282.63795424 * 1e8}, + &TokenPayout{"DsTiMXbk7LifeFDQqEeiTdkCf29Aj5aSiHV", 282.63795424 * 1e8}, + &TokenPayout{"DsTiPav5JEvy9usZP3VFXatT2wDx9pY7rgE", 282.63795424 * 1e8}, + &TokenPayout{"DsTitraNMaRMu8Eo5EX7YxvJ5ZHMceAdK5L", 282.63795424 * 1e8}, + &TokenPayout{"DsTiuLNsxbzZiobRAbfWuyr9SxExbibuPxa", 282.63795424 * 1e8}, + &TokenPayout{"DsTJE9MVcaj6YPTm4dKvy1APbrX6FjEoz5N", 282.63795424 * 1e8}, + &TokenPayout{"DsTJFsX2pkTkN2yvsHDc9AanLAdvYWYwtn7", 282.63795424 * 1e8}, + &TokenPayout{"DsTjgZeS3jXkMe6eFD7SJMPbhqNxaPeVddy", 282.63795424 * 1e8}, + &TokenPayout{"DsTJPf39jPhbVwHB52oBkEse2Mb53ARc63z", 282.63795424 * 1e8}, + &TokenPayout{"DsTJxHD73qabVnZbsBoQ4dortDUKPmFjUDo", 282.63795424 * 1e8}, + &TokenPayout{"DsTJydrSY59G27EEpH6rDCkKA9AdLCLZ9Vo", 282.63795424 * 1e8}, + &TokenPayout{"DsTKJEgxkKZKmZhkpKLtKyYjgxh4GURDGc7", 282.63795424 * 1e8}, + &TokenPayout{"DsTkL2ppC9M6XUqVmBx1j13mAFHCMNp71SD", 282.63795424 * 1e8}, + &TokenPayout{"DsTKSxhpsHF25PB8tUkWxkJiAPvvmNxzntg", 282.63795424 * 1e8}, + &TokenPayout{"DsTKTNckH7YjRcCQ6aBvzqQD4Ayg8qP7R4n", 282.63795424 * 1e8}, + &TokenPayout{"DsTLbcBQ6ZtRjAcF2gPdEEuWvNrnyfeJGbd", 282.63795424 * 1e8}, + &TokenPayout{"DsTLFSnQV2divwpB82DWXgkkeBXQerdNM4o", 282.63795424 * 1e8}, + &TokenPayout{"DsTm36aoV2fsSNnhGX7cAChvKtTXbDgRCP3", 282.63795424 * 1e8}, + &TokenPayout{"DsTm4Q8C744P1C6pb4pLW3y2gN2cB9BEF8V", 282.63795424 * 1e8}, + &TokenPayout{"DsTM6U8ui37UdkGQc2e9RTwRgAwzTt4mtb6", 282.63795424 * 1e8}, + &TokenPayout{"DsTmDJe3eN66TgWdrGAoBqT5iqn34YP7RKL", 282.63795424 * 1e8}, + &TokenPayout{"DsTmeKCQF2W5e7TqA8kN7DWCMwLhJoHEwAS", 282.63795424 * 1e8}, + &TokenPayout{"DsTmercMiWuxvQrGUYA7ZmUEEwuNhzUK7PX", 282.63795424 * 1e8}, + &TokenPayout{"DsTmLNeVaL4wsEbM2TPNqUEjor65GXrm8os", 282.63795424 * 1e8}, + &TokenPayout{"DsTmoYuYGhmDdP33Ch2GTjSHwmgtbTTcN4Z", 282.63795424 * 1e8}, + &TokenPayout{"DsTMT5FyBurTrZ14yzCSzNqusLThvqZF3mv", 282.63795424 * 1e8}, + &TokenPayout{"DsTn9Df59pF8Dk1xVzrTmUNvdNstNHFH12d", 282.63795424 * 1e8}, + &TokenPayout{"DsTNcsH3bx4YgiqvC1Jzckehuta7z9jgVXP", 282.63795424 * 1e8}, + &TokenPayout{"DsTNeh5B8NBZkfksCkDWR4yDB2h1Tu5qgwi", 282.63795424 * 1e8}, + &TokenPayout{"DsTnSu9MxAkmfLwvhEQW5u8udkxhwqpnPge", 282.63795424 * 1e8}, + &TokenPayout{"DsTohdpGSnFbGXTyM2SFkeytx4dhjVC74rb", 282.63795424 * 1e8}, + &TokenPayout{"DsTpHwgzg3TrtvJPweuqecQmP5pb75xmg4J", 282.63795424 * 1e8}, + &TokenPayout{"DsTPj3mUXkorQp2hjw8W5aHMF3rhpVqULPm", 282.63795424 * 1e8}, + &TokenPayout{"DsTPtoDaxVvsP5iyoyML9cip46bNZDR4Rq9", 282.63795424 * 1e8}, + &TokenPayout{"DsTQAZu7on7UZzQwzoF5X3s49E9Fu8qST3o", 282.63795424 * 1e8}, + &TokenPayout{"DsTqWCij29CxVuzENc7spsFj6yxPJDrrzeK", 282.63795424 * 1e8}, + &TokenPayout{"DsTR6z7eYscQ1EWhyZK9CMyarbsG6DF9SLw", 282.63795424 * 1e8}, + &TokenPayout{"DsTrdG9qidmpPbogCdkURMVbAjj5ZUMxtdZ", 282.63795424 * 1e8}, + &TokenPayout{"DsTRruDJaebuY54KzFUGncgi5xFFSFBbBpT", 282.63795424 * 1e8}, + &TokenPayout{"DsTrtFekUf5dJNSax6GmaRPJJHwEN2rRu7a", 282.63795424 * 1e8}, + &TokenPayout{"DsTsxaPeaS7qzQQWDCjtJxF8KUb6aQxYsAc", 282.63795424 * 1e8}, + &TokenPayout{"DsTt9zuQG8E4MyCJqim6VbcJz341KLt477H", 282.63795424 * 1e8}, + &TokenPayout{"DsTU8ksMmgACaz2534HqvT2bUi3W5q4KU61", 282.63795424 * 1e8}, + &TokenPayout{"DsTuLoXhX7qNNeHkGKtkiaQMkRv8puuYryM", 282.63795424 * 1e8}, + &TokenPayout{"DsTV3dJ8MDNu8BSz1kekZbfsyKJ48SDjPMt", 282.63795424 * 1e8}, + &TokenPayout{"DsTvGF1UunrXMxVxTz1mUmjWF2SwM9PMiAn", 282.63795424 * 1e8}, + &TokenPayout{"DsTvP6TKH6j4tox1HT5YU27xLgGXpwVZiDL", 282.63795424 * 1e8}, + &TokenPayout{"DsTwSU6R5QgFt1Ag16fT7E8U3ki3hPsdN6e", 282.63795424 * 1e8}, + &TokenPayout{"DsTxAGa4UjxqSQwTGsrNj929KUH4uwV2LK3", 282.63795424 * 1e8}, + &TokenPayout{"DsTXAjwtz475enH8LjD2TgnJnGHi2TiFYnV", 282.63795424 * 1e8}, + &TokenPayout{"DsTxmtiBd9tFc4AT6qDVsTgaR1UDRESrqm5", 282.63795424 * 1e8}, + &TokenPayout{"DsTXrx6C79tEsCLk4ANFAGFntnGMCucUmxx", 282.63795424 * 1e8}, + &TokenPayout{"DsTxviRdpnpJuop22iRUm57uc6crAjJGD2E", 282.63795424 * 1e8}, + &TokenPayout{"DsTxY6VATKF5reHFu1zYbAPWKzxvVh4Xcp6", 282.63795424 * 1e8}, + &TokenPayout{"DsTyaP7chqos6RXRDHwNWansN59eoknUPXp", 282.63795424 * 1e8}, + &TokenPayout{"DsTYozzQiruEExyKLK1RP1C2W3uKSVJppz8", 282.63795424 * 1e8}, + &TokenPayout{"DsTyRcSoE2FrMohBE53eEUqgfVhChWo2JCi", 282.63795424 * 1e8}, + &TokenPayout{"DsTyVWUsNWiAzHjt5xZuhGYgcqX5beKpiiz", 282.63795424 * 1e8}, + &TokenPayout{"DsTz4kw3Xjh1Vnby1GmhYnkXQRFwm3dAQ3X", 282.63795424 * 1e8}, + &TokenPayout{"DsTzJgEtPm6VbS4DiMtmtwVAqdfWsCsBJC1", 282.63795424 * 1e8}, + &TokenPayout{"DsU1b4yjzZysENJrBPhX4F4XxyrS5JuYGKF", 282.63795424 * 1e8}, + &TokenPayout{"DsU1GPUdvjRXMuErRJnqMAUywimLfxnLwG9", 282.63795424 * 1e8}, + &TokenPayout{"DsU1mNdLJLNdarRaeJ9MVWFcQHvtjF5iPvc", 282.63795424 * 1e8}, + &TokenPayout{"DsU1PAfcNmZ5mgPDr27svBPLuTSqVvoP3ir", 282.63795424 * 1e8}, + &TokenPayout{"DsU2g5C9q3NNJ4xEAWZbk8U3YxW7dRHuXji", 282.63795424 * 1e8}, + &TokenPayout{"DsU2oBzSVCG2H7T6Bkm5yua5RrAhNJGyf5J", 282.63795424 * 1e8}, + &TokenPayout{"DsU2Sgt7RthMuu4z13yMM4YMw5G27hQK275", 282.63795424 * 1e8}, + &TokenPayout{"DsU3gcbxQyDHsytvqrvwXNpZBEVckx9DaFF", 282.63795424 * 1e8}, + &TokenPayout{"DsU3RgtktZFPPoT5GSSn12n1KpmdzFYJ7dC", 282.63795424 * 1e8}, + &TokenPayout{"DsU5kuB6zfqygjeFxoY1vCaJ7KR6HX4oDnb", 282.63795424 * 1e8}, + &TokenPayout{"DsU8cWCsYcpUp73S4Uc9PeQ2994qYRwczh1", 282.63795424 * 1e8}, + &TokenPayout{"DsU93HXRrPHGmafaFjQPhwcDVauxCYgLvY5", 282.63795424 * 1e8}, + &TokenPayout{"DsUA12eNrxE5BxnMR9LjnDiH8PPtkD6aiVr", 282.63795424 * 1e8}, + &TokenPayout{"DsUAfM3RT7Fwp7LnhbSX4MXLCUNqR2RL4bE", 282.63795424 * 1e8}, + &TokenPayout{"DsUamVmq15NuVbdGWhLky3gxCB62k5uJp23", 282.63795424 * 1e8}, + &TokenPayout{"DsUaNxeRr17baDGesV4PfXaA1GZ6aSyeRfe", 282.63795424 * 1e8}, + &TokenPayout{"DsUaRHg1FA391ZwQuozNHjLKsF9kE6QxAis", 282.63795424 * 1e8}, + &TokenPayout{"DsUbd3AbyWtDFmrh1auLGpV5QkrFMT8hZZh", 282.63795424 * 1e8}, + &TokenPayout{"DsUbgJGvmhDmB7zejQYZZMj6YK48a9wxHag", 282.63795424 * 1e8}, + &TokenPayout{"DsUBSmE9hvE76b4LG8a6vfyoPNuoNmcru66", 282.63795424 * 1e8}, + &TokenPayout{"DsUbWESgfdpKYCFbJoYxQTacHPHNnAPWETj", 282.63795424 * 1e8}, + &TokenPayout{"DsUBYZB4mcM83wdJ4Abc2vLzCgRoYemmHQQ", 282.63795424 * 1e8}, + &TokenPayout{"DsUC3qEaYTEHp3YXWHzXLX9YcByXLPGu1tt", 282.63795424 * 1e8}, + &TokenPayout{"DsUcF1whs2gyXFGSChVKHyi5qFenE2V8b3S", 282.63795424 * 1e8}, + &TokenPayout{"DsUcnBP5AayRaduq1ZsdKFVdJgqMaGvrRir", 282.63795424 * 1e8}, + &TokenPayout{"DsUcq11fCLUzDZLuCezMpW2NaWKsVFiZWUU", 282.63795424 * 1e8}, + &TokenPayout{"DsUdedRiUQmej8cdrygfZZ5XYLgeKGup4Eu", 282.63795424 * 1e8}, + &TokenPayout{"DsUdLn1pXEFXNSK54pmhuHF53Sdtz49Q8kt", 282.63795424 * 1e8}, + &TokenPayout{"DsUDQukHAKxp7Unk6UiX8aZBcP2HES2tKfv", 282.63795424 * 1e8}, + &TokenPayout{"DsUdz8wvHJVFBmGcmUzu6E4J3TS2Ms1njdR", 282.63795424 * 1e8}, + &TokenPayout{"DsUej65HfYGzq6Gh5a3KE7hxtVDTumKW1Nc", 282.63795424 * 1e8}, + &TokenPayout{"DsUeJJNk2YAAjYsjBBNLBkpfo2PNm4UB5g6", 282.63795424 * 1e8}, + &TokenPayout{"DsUeL8Zm2qaEUunaNQRC6dm4i2HbKoYDuC7", 282.63795424 * 1e8}, + &TokenPayout{"DsUeMa1jKvDMK6dHsjo21PdQRvXbYfbxdCu", 282.63795424 * 1e8}, + &TokenPayout{"DsUEmq8Tdgw9enV71xu5CQeorvqXPisRoMa", 282.63795424 * 1e8}, + &TokenPayout{"DsUfc5Xy2DbgSrcq4L93nUKpBieUq6G8pNm", 282.63795424 * 1e8}, + &TokenPayout{"DsUFghDuwqSYHxT5Q3UjvLC5VkqZ9JK5WYW", 282.63795424 * 1e8}, + &TokenPayout{"DsUFoP9Hvs769GSdmL8U6TpDYQyyqyL4kBS", 282.63795424 * 1e8}, + &TokenPayout{"DsUfpNTymc8H8RQuRFEq7fCDBrqLWDYuhC9", 282.63795424 * 1e8}, + &TokenPayout{"DsUFxoEou6M1PPFbcPqVzdpbL4Qc3YKvXBj", 282.63795424 * 1e8}, + &TokenPayout{"DsUGD22RV5eAkSzWuBBb7x587cmmEVS2mt2", 282.63795424 * 1e8}, + &TokenPayout{"DsUGf38qzYqxFppfq8E5ZjAPmmHo7yoPLEo", 282.63795424 * 1e8}, + &TokenPayout{"DsUgHMsYgtf6daSGcvrcomazUFALQoPSKYG", 282.63795424 * 1e8}, + &TokenPayout{"DsUGJHHPL1raCB5JKUrDvoiBYLDkKgAV2Nf", 282.63795424 * 1e8}, + &TokenPayout{"DsUguyyAxh79VsRnjZU7pdXGXqoKwtTD7dw", 282.63795424 * 1e8}, + &TokenPayout{"DsUgZzmeSpuCJ6hEpLTwNRM4FbVwVy4oJdw", 282.63795424 * 1e8}, + &TokenPayout{"DsUH1dxDAJ2UA5zG6HU2HcRzpGyCsJrs5zQ", 282.63795424 * 1e8}, + &TokenPayout{"DsUhERDSkHTtgx8soVZShyEKuMxuJxsZBSW", 282.63795424 * 1e8}, + &TokenPayout{"DsUhjkwWYDMeRTYWngAfwCExikz2U6Z3HPA", 282.63795424 * 1e8}, + &TokenPayout{"DsUhpt6XKJR8h92961SZhFcES5ebfKVzshL", 282.63795424 * 1e8}, + &TokenPayout{"DsUHYgPu3BgoDxbRwckrn1k97tCVgGHgj1N", 282.63795424 * 1e8}, + &TokenPayout{"DsUHYPqM3ffgjMfnmdUFsTKKXm5RNa5Gmr8", 282.63795424 * 1e8}, + &TokenPayout{"DsUJ7jpyUoAM5Pnfha1Z7s5EQdKgeq6kYKS", 282.63795424 * 1e8}, + &TokenPayout{"DsUjg9NrqbPq5CjsorSiWWzRW5vbT894ffW", 282.63795424 * 1e8}, + &TokenPayout{"DsUJuL2KWjgYdzLbgD2TCuRvEr8PQ9PGncu", 282.63795424 * 1e8}, + &TokenPayout{"DsUkhCnMKVcyQUU36rMNC5ZgGhEbU7Ni9vF", 282.63795424 * 1e8}, + &TokenPayout{"DsUKHsA9rextZ7BtMtaqwEpgGECLQzsZoQu", 282.63795424 * 1e8}, + &TokenPayout{"DsUKzMUs92yxCS88LNcnECTr79g96MbpRmA", 282.63795424 * 1e8}, + &TokenPayout{"DsULczGCnX4Uhwpo4nfYev9k3xcanfD995v", 282.63795424 * 1e8}, + &TokenPayout{"DsULEYKR9YMxZrHBzcTMBxc6krFFRaBrahH", 282.63795424 * 1e8}, + &TokenPayout{"DsULFBGnZVS15fzDM3DHKkVQkbxiQzQ9Xnz", 282.63795424 * 1e8}, + &TokenPayout{"DsUm52fy97rKa6MvqwtMxHvLFURmECVe5Xx", 282.63795424 * 1e8}, + &TokenPayout{"DsUMAGTkUfJczcgsPB6JAMMKP9RWsqbFwJA", 282.63795424 * 1e8}, + &TokenPayout{"DsUmGg9unMEBb4wBeXYjwUwUHtiMxApeiQe", 282.63795424 * 1e8}, + &TokenPayout{"DsUmVmD5AAmmNGNRkon2Vg2zYLXhziSrHVP", 282.63795424 * 1e8}, + &TokenPayout{"DsUMXNj27iWpxfbgaQWD54AFQYkY8J6hd4G", 282.63795424 * 1e8}, + &TokenPayout{"DsUnB62D4MKs7CuRfhJBD5DJkU9bHPZk27K", 282.63795424 * 1e8}, + &TokenPayout{"DsUngZdmJSzxheiFvs5Hj6JgkAKGHT3rV6g", 282.63795424 * 1e8}, + &TokenPayout{"DsUnPRY8kURK49VqPKBQ6ytSAkQNxC2KQW1", 282.63795424 * 1e8}, + &TokenPayout{"DsUnqbv26qToiDBMpTUoS66btFtZTwtYJX3", 282.63795424 * 1e8}, + &TokenPayout{"DsUnVFHCxpvMyS3Npahyy9EsQrwdCph1PK5", 282.63795424 * 1e8}, + &TokenPayout{"DsUNYn57Ynzy8hyjbfPsjfshEubfzjMmKEA", 282.63795424 * 1e8}, + &TokenPayout{"DsUodUbBtBKxQbxMvBGjnBCNbnUJRzTSgZy", 282.63795424 * 1e8}, + &TokenPayout{"DsUoGeAc7C1BDeE2hjtRymgaD6kUqFfbB71", 282.63795424 * 1e8}, + &TokenPayout{"DsUpdL9T249MuViUjZjjjPexLyE95CcjnHw", 282.63795424 * 1e8}, + &TokenPayout{"DsUPLP9veVvzgYTgP9v2DnranmuP7XSYJeU", 282.63795424 * 1e8}, + &TokenPayout{"DsUpoLoHkaMbzb3arcQSpNsjUDnCozSuaP5", 282.63795424 * 1e8}, + &TokenPayout{"DsUPqmuQ8KCotm2DmF49r6RB26tUYstmBT6", 282.63795424 * 1e8}, + &TokenPayout{"DsUpXotE28zrJcNR92vuLxMUE1Dvon5c68A", 282.63795424 * 1e8}, + &TokenPayout{"DsUQ4nUgjs1dqeMJZfc9GMNTH5VR2jG2oez", 282.63795424 * 1e8}, + &TokenPayout{"DsUQhszDA28wNsB6dPr76PejLtU4mtNYX6Z", 282.63795424 * 1e8}, + &TokenPayout{"DsUqjTes7ozxsRzpncRMLbvqP5Bn7yvnEvS", 282.63795424 * 1e8}, + &TokenPayout{"DsUqqXD86Qy1se8S5bHt6NpDh9Mfd8FTp71", 282.63795424 * 1e8}, + &TokenPayout{"DsUqs66qKiVvLdW3axLMqhT6Fha6tzHbT6q", 282.63795424 * 1e8}, + &TokenPayout{"DsUqUaodrZBNu5dJ665EX4S5iz7DYSw1T3C", 282.63795424 * 1e8}, + &TokenPayout{"DsUQYTQe5Jy24ic7gckaP5wBzLkgaHrak3F", 282.63795424 * 1e8}, + &TokenPayout{"DsURnU3bmSvLk7ByMZBB9KzcabXiDxeUVB5", 282.63795424 * 1e8}, + &TokenPayout{"DsUrorYNb6MTbMfq2NPhHpGyiXCW3N6s2KW", 282.63795424 * 1e8}, + &TokenPayout{"DsUsBGTKHFzeGos9YcxSYHKLaGjEH4qSsai", 282.63795424 * 1e8}, + &TokenPayout{"DsUSk1oGuCGn1Gc4DgQZzfii1efFzFLXwDg", 282.63795424 * 1e8}, + &TokenPayout{"DsUSK68LMaDicfsnAjtbWmbyDK3AcXaD7Qg", 282.63795424 * 1e8}, + &TokenPayout{"DsUsKq5TnRuQp3Ls3gHPBuSjcRKP1Vs5P9w", 282.63795424 * 1e8}, + &TokenPayout{"DsUSLjS3gwqJoA8AzuxVgvF2Mmhw2mYAgss", 282.63795424 * 1e8}, + &TokenPayout{"DsUSNhZo2xytq3Z4D4p4wPyDAkZm2fwur48", 282.63795424 * 1e8}, + &TokenPayout{"DsUsS8Tk7rqPLnyZh3At7kni4d8G4oMUPTF", 282.63795424 * 1e8}, + &TokenPayout{"DsUtBcj1QnJXhPdBN6KzdvL9t3Ugw7k9RmQ", 282.63795424 * 1e8}, + &TokenPayout{"DsUteFK516K7xddVYzCKUtwzKSLMgbjmGo6", 282.63795424 * 1e8}, + &TokenPayout{"DsUTJKfMYfPhrhZgp2rMEGQyQoFJtfgDK15", 282.63795424 * 1e8}, + &TokenPayout{"DsUUQ1WcWK5tE6wGYsWFPXNKqRgymbGzFfv", 282.63795424 * 1e8}, + &TokenPayout{"DsUV92fuC6UnpKF67687D2b1e45JE82rXbz", 282.63795424 * 1e8}, + &TokenPayout{"DsUVqC86hd212PYcqHZ52t4yfPYRXxnfmip", 282.63795424 * 1e8}, + &TokenPayout{"DsUvzx5kMnpti6pivUdiTLPcGW6yw62vk6o", 282.63795424 * 1e8}, + &TokenPayout{"DsUWMUQ2kLVE26eMuegMu5MvDMxffXTSpPY", 282.63795424 * 1e8}, + &TokenPayout{"DsUXBgVw2tNYoNvndwyMMW5MqFZitemieo1", 282.63795424 * 1e8}, + &TokenPayout{"DsUYA7PJT9Te5zYWe6GjzwhPbqhA1R6AY5U", 282.63795424 * 1e8}, + &TokenPayout{"DsUYaZwEigUKbMonVWS7Jb9Ez8tXF7auxBd", 282.63795424 * 1e8}, + &TokenPayout{"DsUYH9mq1CBnRLQv9uZcFWuQeTN4agevVc9", 282.63795424 * 1e8}, + &TokenPayout{"DsUYy5kp5eP5ns3A4UqGLQsxZ3SuNZby31T", 282.63795424 * 1e8}, + &TokenPayout{"DsUZM4MHzo7yLvyorptMXJ6yJfvt4BnfRzJ", 282.63795424 * 1e8}, + &TokenPayout{"DsUZzAk2vVu7Z8AF4jdg33HphdFL1k3dwXt", 282.63795424 * 1e8}, + &TokenPayout{"DsV1wzeF38QU25zYNptvfpNJ2mPTAotbaDT", 282.63795424 * 1e8}, + &TokenPayout{"DsV2k9vKn6moFKD13igWxfdzSxoFcyYBDEh", 282.63795424 * 1e8}, + &TokenPayout{"DsV4kmw2GwWqxJRo87sAM2xJwV38UpxDvVp", 282.63795424 * 1e8}, + &TokenPayout{"DsV5aLsMyn7aW5jKxXmeNBHsTUvpL3GuAe5", 282.63795424 * 1e8}, + &TokenPayout{"DsV5hyEYjepRnbGZcDMoJtwpWAvr6GuMaFx", 282.63795424 * 1e8}, + &TokenPayout{"DsV5t4iocxGdGq7KqtYgQssQE6DRmgVX4Ck", 282.63795424 * 1e8}, + &TokenPayout{"DsV6huXi3jaRCY7jPwb5Pw1pbksN2bbQfAf", 282.63795424 * 1e8}, + &TokenPayout{"DsV6SFMKeYh1NAWtf4YXFg27Zea4Xx3ao9y", 282.63795424 * 1e8}, + &TokenPayout{"DsV6zboqQoyv3PBz1w5Qv26SH8ptJF9PuCH", 282.63795424 * 1e8}, + &TokenPayout{"DsV71poxGyckViVaz9dXBubbjtjViHjHAxf", 282.63795424 * 1e8}, + &TokenPayout{"DsV7G95XaCzfmR11e52FhjdJCGDZa9VqL2k", 282.63795424 * 1e8}, + &TokenPayout{"DsV7kVEEDS4MFBWt6rvLxKskFeBjXNNZv6M", 282.63795424 * 1e8}, + &TokenPayout{"DsV7uLJar2E7hsRXECHhm9Bmuun5udmfH45", 282.63795424 * 1e8}, + &TokenPayout{"DsV7WqYFmbMZ6wk9XTrzFo9z2HwbGFiAs7T", 282.63795424 * 1e8}, + &TokenPayout{"DsV7ZDYr5mBF86ddc2aJ31o3JyDJrSM5Zkf", 282.63795424 * 1e8}, + &TokenPayout{"DsV8n3bhKvPgCQsBABHPffery9X4rEuoHva", 282.63795424 * 1e8}, + &TokenPayout{"DsV98yWcFKGtqWrphEy4DzCjhciJD7zTxPB", 282.63795424 * 1e8}, + &TokenPayout{"DsV9VG6NwyRQ3gYybZCZ8wfMxArtwtvVqVv", 282.63795424 * 1e8}, + &TokenPayout{"DsVa6BQqivbR2L6bBNnANZPdSdiKZL9cmtH", 282.63795424 * 1e8}, + &TokenPayout{"DsVa91SJo3RVNBZWJ3BT8f3F3BxEsYA7uAA", 282.63795424 * 1e8}, + &TokenPayout{"DsVAAZLAyS3635fntbyiuhqMjEdTcviXD27", 282.63795424 * 1e8}, + &TokenPayout{"DsVaBzgWVKLbqoieSq43oGEGzeDyK8DeB25", 282.63795424 * 1e8}, + &TokenPayout{"DsVaKTfW4HHVT6G7yZYb8WCC6tkZsPfPQoz", 282.63795424 * 1e8}, + &TokenPayout{"DsVaqBeBGyd2tLezdfQf3hayiZbkc1e5X9U", 282.63795424 * 1e8}, + &TokenPayout{"DsVAydzUnghEMbkFjWDemhBuicUofU2aPte", 282.63795424 * 1e8}, + &TokenPayout{"DsVb7g96YCR7RUjpwFbmkBFvSxXTn3dH8FZ", 282.63795424 * 1e8}, + &TokenPayout{"DsVbASAjYBZ2pg6CzTLX45tmMQCa8CmMoA1", 282.63795424 * 1e8}, + &TokenPayout{"DsVBE1k7UBFdJnTEGskfUCpJHfXQF4ncQKp", 282.63795424 * 1e8}, + &TokenPayout{"DsVcBh5WtBXQz5GwFFXqDfDS3ipjmWuzn8m", 282.63795424 * 1e8}, + &TokenPayout{"DsVcRAYKKy5ZXiawCTCLY1S8M6CpLaC7ZxQ", 282.63795424 * 1e8}, + &TokenPayout{"DsVD4jczZctMKczDAcAbBTz4z7tc1hDxCip", 282.63795424 * 1e8}, + &TokenPayout{"DsVD8hwjjx48Di4qXzggkjCH7i1qDtc5oB5", 282.63795424 * 1e8}, + &TokenPayout{"DsVDdkP2uKSs4UFYHJvaLcdFS6bntu4vAFU", 282.63795424 * 1e8}, + &TokenPayout{"DsVdejvf83oVuvxcEgmivz7F97bMskKXVp7", 282.63795424 * 1e8}, + &TokenPayout{"DsVdk9LDQKjRM29e5gfehtNP9xXbEX1a8W4", 282.63795424 * 1e8}, + &TokenPayout{"DsVDYFgrS4ZZARYHyduYoGdfVfWLSeBw54c", 282.63795424 * 1e8}, + &TokenPayout{"DsVE4iL2Yr2fyxagtXFfBMmMsknfPpx6Yxv", 282.63795424 * 1e8}, + &TokenPayout{"DsVE9sKbLyzKbfuQkTs8rngGrAp2h3JZy1n", 282.63795424 * 1e8}, + &TokenPayout{"DsVeCYTfqW7UjbTg1jjFTwAuqPY3yV8F16E", 282.63795424 * 1e8}, + &TokenPayout{"DsVEf1x5j81MGSdYBiSQbgZFxeqMnABTwPa", 282.63795424 * 1e8}, + &TokenPayout{"DsVeKSDyZgtEusch1uBgVbVR1sZqnHpTicG", 282.63795424 * 1e8}, + &TokenPayout{"DsVEwh8QmangcnmQWpm56diYEPWicRdR6kA", 282.63795424 * 1e8}, + &TokenPayout{"DsVEYXhSfei8sfYLk2Vj4jLvnZfcXymfry8", 282.63795424 * 1e8}, + &TokenPayout{"DsVf6jbtpiBbm3YovqjWG97y3QCCCvU7rUy", 282.63795424 * 1e8}, + &TokenPayout{"DsVf9q8KZ8BMDM7HNVhon7b6zwmVPn2vABG", 282.63795424 * 1e8}, + &TokenPayout{"DsVFDrnq3jCTRWfC7myeD6Bo19r1RgPPf6v", 282.63795424 * 1e8}, + &TokenPayout{"DsVfDyG4UsY5nexWtZDokSPLG43oe69iGr4", 282.63795424 * 1e8}, + &TokenPayout{"DsVfR4TS8NDy3potHp1pxtTLze6xVUQMpFF", 282.63795424 * 1e8}, + &TokenPayout{"DsVfrqjg3QCXvkdHkZUKLezMWk2gMdL6aV1", 282.63795424 * 1e8}, + &TokenPayout{"DsVFuS166j8dMuTaYwE48Ld5qNtYLQNcUvU", 282.63795424 * 1e8}, + &TokenPayout{"DsVFWkQyVArsFePThmxDkdqztLU4SptTLDL", 282.63795424 * 1e8}, + &TokenPayout{"DsVG1Rdff74V4sxykyXuwGxTBU7PojhgUFA", 282.63795424 * 1e8}, + &TokenPayout{"DsVg4XR9BpfVhpYNCZVPG4q6zgaQwdMD6AC", 282.63795424 * 1e8}, + &TokenPayout{"DsVG6xDcWEZeaBBUPPGqceEeWFvcC7znKi6", 282.63795424 * 1e8}, + &TokenPayout{"DsVg9gSaSTHvmHMJpeQEvRtrqmpwS1ZugKJ", 282.63795424 * 1e8}, + &TokenPayout{"DsVgcHR2Jk3FJawscrDEZJz18VQi4Te9gNX", 282.63795424 * 1e8}, + &TokenPayout{"DsVgHLRHMinxBVXjcAnXsGTFPCoahvNDA6U", 282.63795424 * 1e8}, + &TokenPayout{"DsVgiMwPoG7ZVtmEcy89Nfvfgr4tSJLJi3D", 282.63795424 * 1e8}, + &TokenPayout{"DsVGT7uaZxMGkBRGXh3B91wkgPkrbDJarRc", 282.63795424 * 1e8}, + &TokenPayout{"DsVgu78CrJnRSjTEHx6rzmTbe8D227o4w9c", 282.63795424 * 1e8}, + &TokenPayout{"DsVH3VZVnaahhhgQ89X1qxLSvdPgyvJFoAc", 282.63795424 * 1e8}, + &TokenPayout{"DsVhjAojAGCwdyfPBEwenCtCLeTTju1nPed", 282.63795424 * 1e8}, + &TokenPayout{"DsVHNaCFrZoHvBLYWzqjhcTviAcA3herivc", 282.63795424 * 1e8}, + &TokenPayout{"DsVHRQH3sZd1mxMjEMKj8UHvocVknP4k8iG", 282.63795424 * 1e8}, + &TokenPayout{"DsVifRU9dVN12PrdrBXVSS6mwXSMymT4vg6", 282.63795424 * 1e8}, + &TokenPayout{"DsViPAN9pm3aqWGb43cdQbfYrDtbNPwQhyK", 282.63795424 * 1e8}, + &TokenPayout{"DsVjEfiKDXVhyBRASD2rb2vfrxWyAwocxEL", 282.63795424 * 1e8}, + &TokenPayout{"DsVJixve3uURKpVNL2x8reKVTCf4y9wCgE2", 282.63795424 * 1e8}, + &TokenPayout{"DsVJxt33LfJP37zB5srQVhX8M75psFPGwax", 282.63795424 * 1e8}, + &TokenPayout{"DsVk9vZsxKFZhXercdRdPcVJgHZGvsKcDj7", 282.63795424 * 1e8}, + &TokenPayout{"DsVke2HrKVYH5zycuiFmHZJHyfMgmqwpHBD", 282.63795424 * 1e8}, + &TokenPayout{"DsVLTSwttPoG75o6vG2GN9DfykZku7r82QQ", 282.63795424 * 1e8}, + &TokenPayout{"DsVLZC7aigVf3cMZy5xqUwsP7ZB8SqFTPBt", 282.63795424 * 1e8}, + &TokenPayout{"DsVm1ktXpMiQBd1cb7fmH1jKc8ZHDzSauFy", 282.63795424 * 1e8}, + &TokenPayout{"DsVm67cREJeJ1q8LXi6BcTNnEKGERRxWcVC", 282.63795424 * 1e8}, + &TokenPayout{"DsVmMivdMobi5dEdr7VSMWUAvJQwyGTPmKo", 282.63795424 * 1e8}, + &TokenPayout{"DsVnj23Lmd28dd43VYKXGudGVyq3bCyudnG", 282.63795424 * 1e8}, + &TokenPayout{"DsVnKQG3r86azpafAGBNnVHVeQRUAA1tFa7", 282.63795424 * 1e8}, + &TokenPayout{"DsVNMuC1s43gadwKAogxqkQafpNT9RPCnGL", 282.63795424 * 1e8}, + &TokenPayout{"DsVo6oDuTo2vcUthEBSw1A4MpGCoVkhEvZK", 282.63795424 * 1e8}, + &TokenPayout{"DsVoAuwCaNaggfcDem1cDNuyL1fXMZscfkp", 282.63795424 * 1e8}, + &TokenPayout{"DsVoHT7z9D6XVD9TrPp4J6ynyRCL7vkJYbK", 282.63795424 * 1e8}, + &TokenPayout{"DsVP91tEqRnAK7oXLmjPyCvntDdReDhzJzi", 282.63795424 * 1e8}, + &TokenPayout{"DsVP94zfD5JxfbDonNEQ2qYSd7mQ2VaTkqG", 282.63795424 * 1e8}, + &TokenPayout{"DsVpbo8U8KisjnAjCmPfkcaQZxogNCJFNSL", 282.63795424 * 1e8}, + &TokenPayout{"DsVPComBx8n9DPABMy46mSUwup7wFdHAjRZ", 282.63795424 * 1e8}, + &TokenPayout{"DsVPsZfBurkghqygmjdUX2VcvAiLUao58SY", 282.63795424 * 1e8}, + &TokenPayout{"DsVPU3zacY8nrHQ4kFNU3Bc4M7YemJLxoaF", 282.63795424 * 1e8}, + &TokenPayout{"DsVQ27B2GCnr2UMMR8xWGZuqEB8JmHrLRSe", 282.63795424 * 1e8}, + &TokenPayout{"DsVq63V3y2r6pPU31h1pGAQnF7aikiyJeTc", 282.63795424 * 1e8}, + &TokenPayout{"DsVq8jRmPUwW2p4X26WyEiGQR33ebN7LWZD", 282.63795424 * 1e8}, + &TokenPayout{"DsVqKnbLM7xDtFEjANozbsAS2tURj5kedZY", 282.63795424 * 1e8}, + &TokenPayout{"DsVQZDyKaBy8NxvNbpXh7yCaA6jawAGhJyw", 282.63795424 * 1e8}, + &TokenPayout{"DsVrdeFSCPmpMkKviEyzzorGhQ7yyhzY2xm", 282.63795424 * 1e8}, + &TokenPayout{"DsVRDTYnXzWCfv7MUsE6RYZ37yzxv9whUnW", 282.63795424 * 1e8}, + &TokenPayout{"DsVrrvo68NsXpvPpfn2HcmxeYYQLvKg7HL3", 282.63795424 * 1e8}, + &TokenPayout{"DsVrtSkLA74gu1EdDmSgWacRNT1bEsQyWQF", 282.63795424 * 1e8}, + &TokenPayout{"DsVrz7JY5U3LEXMpPz6zmZ1oGqZHTaMPTFW", 282.63795424 * 1e8}, + &TokenPayout{"DsVSh1mD4iBWHcAWWu6F1CEwihBfogiHngr", 282.63795424 * 1e8}, + &TokenPayout{"DsVt6wVh7DGjGAegGwybMAXNPR1XTU3SnYw", 282.63795424 * 1e8}, + &TokenPayout{"DsVTaB4buKAuez599qpffMSwUPrtDRRhDJQ", 282.63795424 * 1e8}, + &TokenPayout{"DsVtrCugL7G5j1oobgyLUgwmFCxTqVrQgQG", 282.63795424 * 1e8}, + &TokenPayout{"DsVTVhyKq8yvwq7CiqFX6m7NpSHJquXx6Uc", 282.63795424 * 1e8}, + &TokenPayout{"DsVtZxupmrkE98zUsaSfe24Ebxy6N6cRjdf", 282.63795424 * 1e8}, + &TokenPayout{"DsVua6bAfJQBdE7mjX1sC5d3PBTwQvsndnS", 282.63795424 * 1e8}, + &TokenPayout{"DsVUbZaK5i1jWjPkLqYApHPWcKghoK6RNxY", 282.63795424 * 1e8}, + &TokenPayout{"DsVuD44kiGxftyigCMjy1icA7THswVv6Gmt", 282.63795424 * 1e8}, + &TokenPayout{"DsVueG46bKvQ7ooDDTrTdePPQveDcHR4StY", 282.63795424 * 1e8}, + &TokenPayout{"DsVuLAE6AbiWKRYiqfDBZJg6A3BqTQ7Komj", 282.63795424 * 1e8}, + &TokenPayout{"DsVuP63mEDmZSWPashA1hu5MNzHdyxZLqP6", 282.63795424 * 1e8}, + &TokenPayout{"DsVUQe9prY1KkqAFSTPXfETifAbGGyv3xV2", 282.63795424 * 1e8}, + &TokenPayout{"DsVUWec6opM9s9F6infVz2WheTjccVhhShw", 282.63795424 * 1e8}, + &TokenPayout{"DsVvcWQYNSMZr4gGgxLkg4CccZkv5XNQGMU", 282.63795424 * 1e8}, + &TokenPayout{"DsVvtstUUJUi8W2C9CfFP1rMude53pmq8ZZ", 282.63795424 * 1e8}, + &TokenPayout{"DsVWpkMiaPd87omip1KK2nhAfX4Jnvekanv", 282.63795424 * 1e8}, + &TokenPayout{"DsVWti5Trwpnu9axDBND4STyQnzBTTos4gS", 282.63795424 * 1e8}, + &TokenPayout{"DsVwwdjdbzkGqUGsxNNYxuvnYKEv8Cim25X", 282.63795424 * 1e8}, + &TokenPayout{"DsVX1KvrAJnGJvdKaxofeBAKkUAGEdKKtwo", 282.63795424 * 1e8}, + &TokenPayout{"DsVXhYQzjSPyMThLbLWhiqGg9fgWCCHVkPr", 282.63795424 * 1e8}, + &TokenPayout{"DsVxxMYucAuMDexG6wB7YUdSLEFyuzBoWa3", 282.63795424 * 1e8}, + &TokenPayout{"DsVy2guPZk94RJrBPXfgPRtkwT3PcrukkGi", 282.63795424 * 1e8}, + &TokenPayout{"DsVY3mNwNJUJYpmw9BWJZvR94CnX6zMp5cm", 282.63795424 * 1e8}, + &TokenPayout{"DsVYDPhkWruo4JLLaEp8jMGW1MN4bugwxNt", 282.63795424 * 1e8}, + &TokenPayout{"DsVyinCqizVgyKSjTcabmTQvms9he4nDdfH", 282.63795424 * 1e8}, + &TokenPayout{"DsVyQAXZw7amJEBa9FNSuiQo6JmZThdfcFQ", 282.63795424 * 1e8}, + &TokenPayout{"DsVywr11D79HoViBZDpF8vgTWbV6spnTJMP", 282.63795424 * 1e8}, + &TokenPayout{"DsVyXqEmctyM2Wi2gSULTosCXgmvSYx9xSj", 282.63795424 * 1e8}, + &TokenPayout{"DsVyzbPP3CG6spcva4AGwMucmKsxSKxTvzE", 282.63795424 * 1e8}, + &TokenPayout{"DsVzAB4RU3wNgtmAFsgRb575J4kYchgjxvW", 282.63795424 * 1e8}, + &TokenPayout{"DsVZe8u9RhL71Xk1siKe3chbvRy4fzrqzcN", 282.63795424 * 1e8}, + &TokenPayout{"DsVZHqSPhdwbjrDEuuQmp8ESMdNuPzNhiZQ", 282.63795424 * 1e8}, + &TokenPayout{"DsVZKe2g44QAGiuYBEw6JFowKNoSrPHFNnN", 282.63795424 * 1e8}, + &TokenPayout{"DsVZNQHKTRb9argqfbN8KDQr4Qy2nhjYs2k", 282.63795424 * 1e8}, + &TokenPayout{"DsVzvwmrBFRSvqjLczd8ursB71s13HFZxUC", 282.63795424 * 1e8}, + &TokenPayout{"DsW12nTWBRQEWqpbhWWbXSYEEuENz2cmGRz", 282.63795424 * 1e8}, + &TokenPayout{"DsW1AsxvVG1Z6xH2pUTHKD4DFn2wHAgCZ7c", 282.63795424 * 1e8}, + &TokenPayout{"DsW1CL5CTjmnHHn2ePy6jB6J7jJgRbJxe72", 282.63795424 * 1e8}, + &TokenPayout{"DsW2chWwvvmbFCSk3iYoRGJidcSNxDi72jx", 282.63795424 * 1e8}, + &TokenPayout{"DsW2xpVofjkgZWdL5SwXacpwodrgLDmkiGY", 282.63795424 * 1e8}, + &TokenPayout{"DsW3DUtfN3KZAMYbmDJK7tsRWmxGhupXE9R", 282.63795424 * 1e8}, + &TokenPayout{"DsW3e4UwDP4JE4FvaQTWPWoE45naQmoEaYQ", 282.63795424 * 1e8}, + &TokenPayout{"DsW3Un6dJyLtdKPrdZEYCDPNFR8dHxp5nbt", 282.63795424 * 1e8}, + &TokenPayout{"DsW4d9Pn6Su5EiAgoZ9eqBpdPHM7mCMaThH", 282.63795424 * 1e8}, + &TokenPayout{"DsW4zHi9FiosLHMk9UZPgT2SSmLT8kMepnr", 282.63795424 * 1e8}, + &TokenPayout{"DsW57Kaw7Tn67DZmqC8wgUgKN6xwk5SD8qf", 282.63795424 * 1e8}, + &TokenPayout{"DsW5NkBy2SvzAnEgKFsfqt9yxmUPG2y9BrF", 282.63795424 * 1e8}, + &TokenPayout{"DsW5rivbVkPsJE4cCWPEmMPkGjSiZ3ME2Zt", 282.63795424 * 1e8}, + &TokenPayout{"DsW5tUzZyV7M69tUS7tG1vz3AU4Lg2q6opw", 282.63795424 * 1e8}, + &TokenPayout{"DsW63341rvADrxhU1WotbBC6E8eQax7EtFZ", 282.63795424 * 1e8}, + &TokenPayout{"DsW6NNCC3ViYyUEc1PU3wN8iAFL4vbN7CmC", 282.63795424 * 1e8}, + &TokenPayout{"DsW6tAm4WL2z3GPuSQfzSQBo9xjRVF7PUyc", 282.63795424 * 1e8}, + &TokenPayout{"DsW7hWMmH8eyHLvinkscds47GjMbKrHJiJF", 282.63795424 * 1e8}, + &TokenPayout{"DsW7VHugdBid4HJDQiHdZu8wiZPE6MTz6jV", 282.63795424 * 1e8}, + &TokenPayout{"DsW88mffRJeaBsVG3NMjfDVNYyc7zmDEfHZ", 282.63795424 * 1e8}, + &TokenPayout{"DsW8ndztW8wPPjSyBz5r8jL9BqZpsaffYAV", 282.63795424 * 1e8}, + &TokenPayout{"DsW8tWBP4zqie6tgz2TnEBQXVpdq7gKcdin", 282.63795424 * 1e8}, + &TokenPayout{"DsW9jyRicBxuDSyTxYn2vRX918vfafVWCKR", 282.63795424 * 1e8}, + &TokenPayout{"DsWAea86ijUBRPPHvnLUxzXhRzzeRaXawxk", 282.63795424 * 1e8}, + &TokenPayout{"DsWaqqCD8W3uYha3ovoUSCNq1NDubAxS9jN", 282.63795424 * 1e8}, + &TokenPayout{"DsWB6pqDWd1oe1JKuFkBdvUGCneVFoS2EK3", 282.63795424 * 1e8}, + &TokenPayout{"DsWBBrbVXpc1fZamteNh9AZQ8Qh1LEMEGXk", 282.63795424 * 1e8}, + &TokenPayout{"DsWbJLmJhB5CqkHKhjM5iMSLXoAJP8x5Dwg", 282.63795424 * 1e8}, + &TokenPayout{"DsWbPGikYsZXpZBK96fY2UpDT5EdfAT821r", 282.63795424 * 1e8}, + &TokenPayout{"DsWbWf3QqnmrEq3FbdvpyTBJT3bxJeCatki", 282.63795424 * 1e8}, + &TokenPayout{"DsWc6k1TR8Pe6fWvjohPiJwhQSLGatzTC2j", 282.63795424 * 1e8}, + &TokenPayout{"DsWCdji7syM1TNCu2H9pCzz9Zeus7n1948c", 282.63795424 * 1e8}, + &TokenPayout{"DsWCNkGya7F9S3jwc9HvPJh4cKynEtMhrBe", 282.63795424 * 1e8}, + &TokenPayout{"DsWCRyZQj4s4CiAg1mG1SZ2HCVFeyp2Ke1W", 282.63795424 * 1e8}, + &TokenPayout{"DsWcUUwxivsXCqarJ1vSYYT6EVVY1riNDnq", 282.63795424 * 1e8}, + &TokenPayout{"DsWD98srNpGsn4GiWjGEBrLHR7eA2G62Ccx", 282.63795424 * 1e8}, + &TokenPayout{"DsWDA1NF3BvtXqcCCjior3J9copK56FUmJD", 282.63795424 * 1e8}, + &TokenPayout{"DsWDAHvZzuvBjFoUosrshB2Pu6sqHfnfUeh", 282.63795424 * 1e8}, + &TokenPayout{"DsWdFrgfAQXQmCgsg4Gsth3dyfSxeTWbc4f", 282.63795424 * 1e8}, + &TokenPayout{"DsWdJ1NuHumqJXLmt4fQMR2sM6MNf3VfFfy", 282.63795424 * 1e8}, + &TokenPayout{"DsWE7yBzmLt5irNbevXqXybz9GyRXxs1kf4", 282.63795424 * 1e8}, + &TokenPayout{"DsWeeczBRvVFuSfhE91MYu3JLSxJfccSkSU", 282.63795424 * 1e8}, + &TokenPayout{"DsWF34cVNUBJnRqgWWJD19EMwLxkwRJjb9N", 282.63795424 * 1e8}, + &TokenPayout{"DsWfbQjFW7dXBQ149XnUsnXPP4VzVxTu3Mc", 282.63795424 * 1e8}, + &TokenPayout{"DsWFmWNHtMsTgvdPr23EGLBboRWnyGyUo4u", 282.63795424 * 1e8}, + &TokenPayout{"DsWfQqzkJrXKboo94YbC9RoZTfDTXNQ1BMw", 282.63795424 * 1e8}, + &TokenPayout{"DsWfWvCscm3HSjUxPrLQtmn5GnyLEt6fYsU", 282.63795424 * 1e8}, + &TokenPayout{"DsWFyFzurfYznSDFqzkt4K4QdQ2aQam56pL", 282.63795424 * 1e8}, + &TokenPayout{"DsWg6irfoNZrXccwtmn82fZn3CZkYaHot2Y", 282.63795424 * 1e8}, + &TokenPayout{"DsWGA4xGh7FgWcWbFRZpPZqMP3XBa9kzqAx", 282.63795424 * 1e8}, + &TokenPayout{"DsWGjM5V6ujYKeZbXEpi4WDWLMpkNDjFg3f", 282.63795424 * 1e8}, + &TokenPayout{"DsWGLCLDHkqtRfdiZhbrYLDb9wH4n8MQLtk", 282.63795424 * 1e8}, + &TokenPayout{"DsWGvHua3iLDsogTr9TikVAj3DdszuJiCyc", 282.63795424 * 1e8}, + &TokenPayout{"DsWgVX9HYrQdk2tcyWczpPfvfBzbEeV68HG", 282.63795424 * 1e8}, + &TokenPayout{"DsWh6wX6uJugXaHZGHtAYSCyVnf5qb9jPdZ", 282.63795424 * 1e8}, + &TokenPayout{"DsWHAJcewEGQVTHpGyZE9ZZA7FLbpQjW6gG", 282.63795424 * 1e8}, + &TokenPayout{"DsWhcHXXQMi2YtuPCSLmRMGcyHGDQLmdmAa", 282.63795424 * 1e8}, + &TokenPayout{"DsWhcxabdmZo5PVnSjNXTowKJCYFNHkGd5w", 282.63795424 * 1e8}, + &TokenPayout{"DsWHD3TTrG71PczgTmbRdCHrjhS9Msqc8V6", 282.63795424 * 1e8}, + &TokenPayout{"DsWhf5m1MhY88uFSgCMQnt2MPy3Nn3Vrse1", 282.63795424 * 1e8}, + &TokenPayout{"DsWhh17rqs89kbW8gwaeovnwtkmUKuqWSw3", 282.63795424 * 1e8}, + &TokenPayout{"DsWhK53x2zjzm4e9FPZWLhupTo89HkZC4FW", 282.63795424 * 1e8}, + &TokenPayout{"DsWhvrg63KSiZZ7ZM2JS2T71FBoR9R6X5SN", 282.63795424 * 1e8}, + &TokenPayout{"DsWiNj17EPKnNWgYX9sqVwzQieBWweQ2ieY", 282.63795424 * 1e8}, + &TokenPayout{"DsWioJzJpxZLeb3XA2r3xLYRbxinqPDonBG", 282.63795424 * 1e8}, + &TokenPayout{"DsWirYV6BN9qviiLWbZcGTJYrJgqYmdGU5r", 282.63795424 * 1e8}, + &TokenPayout{"DsWj23breDyYopYT7s84eKGTKihq8z3Mk3m", 282.63795424 * 1e8}, + &TokenPayout{"DsWJahN3vzJ4x67GEaPmh3Zfq5Y4wRc4p6v", 282.63795424 * 1e8}, + &TokenPayout{"DsWJAZUNLpWwfVab1jfYi9cHx3BwBkQmJfG", 282.63795424 * 1e8}, + &TokenPayout{"DsWJEqnB9onuNU5VsCXTLd1dcXkTQ7uVzB4", 282.63795424 * 1e8}, + &TokenPayout{"DsWJmLjNETFk1exdDQTwLPjqxNmMEpvLbZc", 282.63795424 * 1e8}, + &TokenPayout{"DsWJrg9tKiYyEuaRzzdMcASgBMZGqLWbAGc", 282.63795424 * 1e8}, + &TokenPayout{"DsWjYm943BqEpjM1JsMVkLY8X7GcqB2dD1H", 282.63795424 * 1e8}, + &TokenPayout{"DsWKea8AbdCnyVBvwdwbPrttrZTY2nbhL1N", 282.63795424 * 1e8}, + &TokenPayout{"DsWKhdeUL69w5G6PKr5KqEGBg4YcxKtEvim", 282.63795424 * 1e8}, + &TokenPayout{"DsWkJjFX68ecDik1XnufpLZ4czHfG3ztzN1", 282.63795424 * 1e8}, + &TokenPayout{"DsWKMu4scAGXFnMox39B3rvXwwVuXu1RKFz", 282.63795424 * 1e8}, + &TokenPayout{"DsWLeqfUgfM4ybWogUDJpgGudGhL6oXksrH", 282.63795424 * 1e8}, + &TokenPayout{"DsWLh8g4Zddbj3qUZwA1YgQBa4vzHd28Fqw", 282.63795424 * 1e8}, + &TokenPayout{"DsWLM2RvXjZAM1jExvYm9S7VsX5NqB1CZge", 282.63795424 * 1e8}, + &TokenPayout{"DsWLyuPaYYqtJNkf2bmkNzuBjeGGFzrMmt7", 282.63795424 * 1e8}, + &TokenPayout{"DsWm8KYR6LAXXu8tmcpVVZQhhevmSKpnDRB", 282.63795424 * 1e8}, + &TokenPayout{"DsWN914o1wgC3aazBErRstEVCagyXkVXnxp", 282.63795424 * 1e8}, + &TokenPayout{"DsWNBHYjq2jkH1eqPX5XQ3J8wmb9mp4e8hs", 282.63795424 * 1e8}, + &TokenPayout{"DsWnf6hd379qYeFrsqVZ1hdtEfuFgVejzJu", 282.63795424 * 1e8}, + &TokenPayout{"DsWnK3WyMvkpPs5zvmjjtHgKGhXwVqPZKCW", 282.63795424 * 1e8}, + &TokenPayout{"DsWNknhnPUikjUN5didKvacT3tsxUYEJLRR", 282.63795424 * 1e8}, + &TokenPayout{"DsWnstUsLb4m2jDKCeYwfgHjwCxykaqZsDA", 282.63795424 * 1e8}, + &TokenPayout{"DsWo4yEAyfLyvcmrsuCQUtqaJaZ5wiJyZgj", 282.63795424 * 1e8}, + &TokenPayout{"DsWoiNTRS3bE1PjAU1VsMSFZtZASnPKUN6s", 282.63795424 * 1e8}, + &TokenPayout{"DsWpDkN2Lb6bDHpNcaxFkfv2bEqsM74cEFZ", 282.63795424 * 1e8}, + &TokenPayout{"DsWqBEHgT4yjepBZR6As1gsHY6WgNueKLFt", 282.63795424 * 1e8}, + &TokenPayout{"DsWQncuP19PY7doG2H1TaR8JPhYeXnvappN", 282.63795424 * 1e8}, + &TokenPayout{"DsWqtuj2bpuXCPubomRSY7A9Q8yE5Tdt3gD", 282.63795424 * 1e8}, + &TokenPayout{"DsWqVfebzLkJmjadBhgTemiK6wrDWaPPGZL", 282.63795424 * 1e8}, + &TokenPayout{"DsWRCjwFuE7WkGZ6j98UzZchC3H6SkkENjH", 282.63795424 * 1e8}, + &TokenPayout{"DsWRdymevNWjpZtdHhACZ8spWB4cbPc9zLH", 282.63795424 * 1e8}, + &TokenPayout{"DsWrjo39PLhfroyTF96p4tq7KRgRkbA77Hb", 282.63795424 * 1e8}, + &TokenPayout{"DsWs2gH1ATeptkVLYe7XPYStNRygtK7kGVG", 282.63795424 * 1e8}, + &TokenPayout{"DsWS2qLvvdqRMqzcAHeoMg8tcaoxdTp6K8D", 282.63795424 * 1e8}, + &TokenPayout{"DsWSDqYdkjwdMEpFZFQQS8bvfvc3xCXTUby", 282.63795424 * 1e8}, + &TokenPayout{"DsWSRvJtn7mqmh92umTMvx5WKk5gThr6UfQ", 282.63795424 * 1e8}, + &TokenPayout{"DsWSvd6BhtYhXBcF1ejRJ7WK9UgMpXJ6DNu", 282.63795424 * 1e8}, + &TokenPayout{"DsWSwqznrSuqYeM8NWs5YZ9T2mh3R8u7CLT", 282.63795424 * 1e8}, + &TokenPayout{"DsWtk3cFb1ya3MKNfdrC99vGTxVZuAqscvD", 282.63795424 * 1e8}, + &TokenPayout{"DsWtnDry7cpLeMmxFsyszUmgbLjxhMHLXay", 282.63795424 * 1e8}, + &TokenPayout{"DsWtwWGyf1jxEXrsJhY7X369CEdNi6z84id", 282.63795424 * 1e8}, + &TokenPayout{"DsWTzpdxqM8qFHiEWhG5viUd18o5QDNDgqb", 282.63795424 * 1e8}, + &TokenPayout{"DsWuPZUnsxkGtKrbGxsD4Ed4Wb7VpsbW2RJ", 282.63795424 * 1e8}, + &TokenPayout{"DsWuRpwknF7Ca29kLbKm1Ff7yjD9MACR6x7", 282.63795424 * 1e8}, + &TokenPayout{"DsWuZEdc5pJVPqpXU1G3HthtCnmFPeHnYdn", 282.63795424 * 1e8}, + &TokenPayout{"DsWVCQcfgAdRporWYcmSE3UnbM5ftJa7o1t", 282.63795424 * 1e8}, + &TokenPayout{"DsWVpNYnvD2rT8VcAJpHPxWP4dhDk8E1h2x", 282.63795424 * 1e8}, + &TokenPayout{"DsWVQV1S1yj21MScLbAumAwYbSVzgBoxZ2W", 282.63795424 * 1e8}, + &TokenPayout{"DsWwHLrXcaTM86Ms24J7QTUufeidxpwicYF", 282.63795424 * 1e8}, + &TokenPayout{"DsWx8ga6sfYYG7s4yv9XBDQU77GobRLT8wd", 282.63795424 * 1e8}, + &TokenPayout{"DsWxCCZZqJbk3NS59Q1pDZ8Dfk8VeHpL2z2", 282.63795424 * 1e8}, + &TokenPayout{"DsWXGaZNQfCU69q6LzYBKhgAqp1dhMSLSCP", 282.63795424 * 1e8}, + &TokenPayout{"DsWXpshVYxbhbdScBWmr9V523yBU9wviu6M", 282.63795424 * 1e8}, + &TokenPayout{"DsWxqe1fowrcpizf3PhFDpGJavfUwE3LXNx", 282.63795424 * 1e8}, + &TokenPayout{"DsWY2gEKE8eFKgRabeaG7YzhijDmMukWCis", 282.63795424 * 1e8}, + &TokenPayout{"DsWYbhKQw6tZradDFp5eSN9zgBXCBWZdSQs", 282.63795424 * 1e8}, + &TokenPayout{"DsWYxeHeoJ88p5ExgHYb6vZEMXZWKzTRpHR", 282.63795424 * 1e8}, + &TokenPayout{"DsWyzecFFDQzYQrW4zWL45c2xrymBAFaG9x", 282.63795424 * 1e8}, + &TokenPayout{"DsWZSTi9b3A7ybFMCiixwAaDMoMFYFYaQZ6", 282.63795424 * 1e8}, + &TokenPayout{"DsX1BTMxTVjBNDmjCs3qz97W6oZGuHBrcEE", 282.63795424 * 1e8}, + &TokenPayout{"DsX1DiZqxfr7nsjzZ68pBU1ER1Y4bEzdCN2", 282.63795424 * 1e8}, + &TokenPayout{"DsX3homu6NsTN1hgN3Hm561KZXtQXfZJ1xB", 282.63795424 * 1e8}, + &TokenPayout{"DsX3ogL8MWn6GNLfba6rZfVR2TEiin2PvSE", 282.63795424 * 1e8}, + &TokenPayout{"DsX3u7K626VenKhJ1pR2Rkg1sXMoB3Y5kJo", 282.63795424 * 1e8}, + &TokenPayout{"DsX4aqVaE6f31sDRw5aPKEVMXs72BBFSnuu", 282.63795424 * 1e8}, + &TokenPayout{"DsX4Cywxb5SMfQrwrB7NXMgfLBU8gofZeGN", 282.63795424 * 1e8}, + &TokenPayout{"DsX4o5R5trkjZ9k5HnTNRciNHpjpicu5PBn", 282.63795424 * 1e8}, + &TokenPayout{"DsX4QE4Cz6GXmaKo3i7MLsLhLj64b2mzhDT", 282.63795424 * 1e8}, + &TokenPayout{"DsX5EV73SDRMEqavP7cP6pp1Yn3G9Pcjxtc", 282.63795424 * 1e8}, + &TokenPayout{"DsX5t4DPKmCxHNJ1F2kRtHQrbMvpBzkqggH", 282.63795424 * 1e8}, + &TokenPayout{"DsX7ixbnaKWk4tibPNtHvEHdS6B9ppJ5j4W", 282.63795424 * 1e8}, + &TokenPayout{"DsX8Ambdi7a7xuieJT8gM62DyHvDFkHiSQS", 282.63795424 * 1e8}, + &TokenPayout{"DsX8V6SzR4wRSUCb5D43e6vyy4KvJTvpWiG", 282.63795424 * 1e8}, + &TokenPayout{"DsX9BWqZeRrsgdp66nJzyBDibgRj5CqAcCh", 282.63795424 * 1e8}, + &TokenPayout{"DsX9eapWisqgcK45BrvaRxAwbq32sXiBLah", 282.63795424 * 1e8}, + &TokenPayout{"DsX9sxaLyt3T7ZpAap9B4zYQrHto2vLZzA8", 282.63795424 * 1e8}, + &TokenPayout{"DsX9uvoV1wKeiQZgxWzm3HjgLbsZPNvQSuJ", 282.63795424 * 1e8}, + &TokenPayout{"DsXaCfuh4TUs5AsNzDXXy76x6odBzr76TR7", 282.63795424 * 1e8}, + &TokenPayout{"DsXAgNYVUY6xxhJ58ypkjnhbsbCSYAKfq7k", 282.63795424 * 1e8}, + &TokenPayout{"DsXaMnwgNpZWmbRyhzC2eLY8Qen5DujtZ9h", 282.63795424 * 1e8}, + &TokenPayout{"DsXAntwEWTqC7hPwEuVm16xK6wT8f5QAWsL", 282.63795424 * 1e8}, + &TokenPayout{"DsXapg5YsqzgaU6jWXxNaHbkdFRnsozYAz6", 282.63795424 * 1e8}, + &TokenPayout{"DsXAsLt4kGaA4rNV8e9Tu54nRgEjtae3vNd", 282.63795424 * 1e8}, + &TokenPayout{"DsXATQk9QsGjbJqfEu1PGGxNpodX7SEcPsk", 282.63795424 * 1e8}, + &TokenPayout{"DsXauT6h7VZdcAntcwXMH3EjfvANTwRuhgh", 282.63795424 * 1e8}, + &TokenPayout{"DsXbMGk7PtgMqYDKsnebF2PFbBQdCJvBzGy", 282.63795424 * 1e8}, + &TokenPayout{"DsXBN5fpzabn6RN6mDveehofgFQaubacwND", 282.63795424 * 1e8}, + &TokenPayout{"DsXbvfWRBpLNUqjvfB7jmKMyX6K3w3P2dtX", 282.63795424 * 1e8}, + &TokenPayout{"DsXbwUYVaVB6cwZBMCZKd229z4RW2N3Webq", 282.63795424 * 1e8}, + &TokenPayout{"DsXcd8FLUgKHbTRL9Bed5NDfUFfG6XHkNvn", 282.63795424 * 1e8}, + &TokenPayout{"DsXCRp7w3xLjeVDun1WZ6sF9J6AxPoi5poV", 282.63795424 * 1e8}, + &TokenPayout{"DsXcu2CjHCUhxKVuoZqTWNkp7tABYfTVrdf", 282.63795424 * 1e8}, + &TokenPayout{"DsXCW6uyLd2YNVJLpVxyi1YZKJbFWCWu3BR", 282.63795424 * 1e8}, + &TokenPayout{"DsXcWnanAF1XrwEiU2EpWEbvtxrkUWr2v7E", 282.63795424 * 1e8}, + &TokenPayout{"DsXd9XfhaJhqtoFnGrqMGE3Z3KEw8pgsb6J", 282.63795424 * 1e8}, + &TokenPayout{"DsXdZLScxGENLpnR11C8teRPDzr4QM9Q4Tf", 282.63795424 * 1e8}, + &TokenPayout{"DsXEMoiP9z1X2Tay4PofHffQFh5aCXGRyNJ", 282.63795424 * 1e8}, + &TokenPayout{"DsXEzJtnUSpZdtmRfZdxYw7jjAkcyADDkTg", 282.63795424 * 1e8}, + &TokenPayout{"DsXf3iMh3mKDTn2hGgwoJukCcKWqDq1AfuG", 282.63795424 * 1e8}, + &TokenPayout{"DsXFaRgA6CXGMHkYF3mSBnCMAJTAxFa7gc1", 282.63795424 * 1e8}, + &TokenPayout{"DsXfn1m7cb1RFuZPReS2gMeygNL7nzmDkej", 282.63795424 * 1e8}, + &TokenPayout{"DsXfpypg1Ex9LdsL55WHNJwP7Wfyfpr5Z5a", 282.63795424 * 1e8}, + &TokenPayout{"DsXFWDXwSNEJGYcBHwiATM8kjX1frAVuzmo", 282.63795424 * 1e8}, + &TokenPayout{"DsXGw5Noc1zxxyeU5hUioT99n155vCJEjDQ", 282.63795424 * 1e8}, + &TokenPayout{"DsXgWrPtnpAwUNJo7FK7UKYFVv12UAajH9S", 282.63795424 * 1e8}, + &TokenPayout{"DsXhEwc8qFRyPv8LmQTrkPkENVahFnFhJPi", 282.63795424 * 1e8}, + &TokenPayout{"DsXhfKYn8GxyEG6w3XUhk9TdLPFWewUs39K", 282.63795424 * 1e8}, + &TokenPayout{"DsXhivDP7Nf7R6j25w9b1UCpbHMw76mKxZz", 282.63795424 * 1e8}, + &TokenPayout{"DsXhScjdUau65WoyxRih3VpkYZME72JqQG6", 282.63795424 * 1e8}, + &TokenPayout{"DsXi9ap8eWtyYBcEW5EdREciC957SS4myHu", 282.63795424 * 1e8}, + &TokenPayout{"DsXix1N3YMV6kN2ML4aRN21PVbQGiudE8fz", 282.63795424 * 1e8}, + &TokenPayout{"DsXJ9hi4aXEnTvK93mv9F9a51Qx9o7iFqMQ", 282.63795424 * 1e8}, + &TokenPayout{"DsXJLjvSWUyqNSgxgoxjwXR5zT7VE4M4Vq5", 282.63795424 * 1e8}, + &TokenPayout{"DsXk4AvXvHC1nZvi2wEwsP3mmvLhGvKn5kv", 282.63795424 * 1e8}, + &TokenPayout{"DsXk7jDrcdr3nYns4xS66cgX5e2RmJsKxQ1", 282.63795424 * 1e8}, + &TokenPayout{"DsXK7wrWaAN2Xr2anmuNH1hFWKYHByQYATH", 282.63795424 * 1e8}, + &TokenPayout{"DsXL7Eyca2T1YfDyZ3ssjhJwpYoxtjF9FU9", 282.63795424 * 1e8}, + &TokenPayout{"DsXLCkxJyzh3jqPU5KdcX7LtevXVhYSnh2u", 282.63795424 * 1e8}, + &TokenPayout{"DsXLHoYXvrFYuaLkLemk3iHmSAHJHaqBXHq", 282.63795424 * 1e8}, + &TokenPayout{"DsXLNmuFktn53J5RvNxR4SqvEtW3p4eMFzY", 282.63795424 * 1e8}, + &TokenPayout{"DsXLQnXPJsAzhiubScaCta2zCzf3hx258qV", 282.63795424 * 1e8}, + &TokenPayout{"DsXLQZgwgkL31HuHrB2L7hYbKWR2VJ7MSm6", 282.63795424 * 1e8}, + &TokenPayout{"DsXm2VqSSS86EAxdB9yd7fRveSpDPbZYz53", 282.63795424 * 1e8}, + &TokenPayout{"DsXMR1bJ5ZtCtJZ4agtyyextgr1vG96mota", 282.63795424 * 1e8}, + &TokenPayout{"DsXMvc4R1iEGvqbrdSfTXRKt2kW3s4iPw21", 282.63795424 * 1e8}, + &TokenPayout{"DsXn25f33DyyvbYiiSbTtgmTDmEyKMxBmpF", 282.63795424 * 1e8}, + &TokenPayout{"DsXnrGNeZGrSZBN2PSDdBvfYzz9irkK3UaU", 282.63795424 * 1e8}, + &TokenPayout{"DsXnSq37FDYj47GeRGpKZauVkpEhpBkVQHM", 282.63795424 * 1e8}, + &TokenPayout{"DsXNWPk1GhYsu5wLdWFrAH53pzBzAFTtwnX", 282.63795424 * 1e8}, + &TokenPayout{"DsXnzf2H7H2aWRPgcrwJA26FE57uwB76DKh", 282.63795424 * 1e8}, + &TokenPayout{"DsXoN1MVBTZoC3adDy1orqQmy4QBwt6eTaH", 282.63795424 * 1e8}, + &TokenPayout{"DsXppE36V93uF3uWJq6bQWD6b5QWr7L93Mr", 282.63795424 * 1e8}, + &TokenPayout{"DsXQ5r6BScWRwoD3wJXnpxeua6QxhCALxkF", 282.63795424 * 1e8}, + &TokenPayout{"DsXq9LyJrNLzTodrEDxEzxsL9ajUjsjZ9H2", 282.63795424 * 1e8}, + &TokenPayout{"DsXQjk2LhmANwvh4KypAE61gFDRQAweGiw3", 282.63795424 * 1e8}, + &TokenPayout{"DsXqk71MRSSQpTxN6rKZjQYpkFiBC5YqUwn", 282.63795424 * 1e8}, + &TokenPayout{"DsXQWhfqdifxZYfYejmsBUSkVMQ6o7arV8a", 282.63795424 * 1e8}, + &TokenPayout{"DsXqWLp2396qMgEUc15X1vvd81wkB8W1d4Z", 282.63795424 * 1e8}, + &TokenPayout{"DsXqxUfpv9thhFqxm2hDd2WjSPskCFMNM3G", 282.63795424 * 1e8}, + &TokenPayout{"DsXr4sfyhA91sTHrxaKUucB71feqbSevDsn", 282.63795424 * 1e8}, + &TokenPayout{"DsXRdfKnBtVd25vBXPsnPTY1z8JVUXXtYFf", 282.63795424 * 1e8}, + &TokenPayout{"DsXrjnzCt5sQhWxDGo31XP6FcA9kkR9np4J", 282.63795424 * 1e8}, + &TokenPayout{"DsXRt3mxsgiA5uYSLxk65xgWCsnu6Xwp7ue", 282.63795424 * 1e8}, + &TokenPayout{"DsXRtoN9hka5wdVSs1FKdFXwf4QUNeYLbr9", 282.63795424 * 1e8}, + &TokenPayout{"DsXSFctNKv1dSz6SxszSnf1JdJ5LvuzphvL", 282.63795424 * 1e8}, + &TokenPayout{"DsXSJT3GcNXySYZpTxpjZnfQU2jGPsgkDrb", 282.63795424 * 1e8}, + &TokenPayout{"DsXSK4Ld5FdZmDtRHygyNZL251t95GASMWC", 282.63795424 * 1e8}, + &TokenPayout{"DsXSRYChBazfcR8LXwy1tYNNjDBK34sVXpE", 282.63795424 * 1e8}, + &TokenPayout{"DsXtcgZRgTNoURDUW6KV4QgihgJD9R9ua3Y", 282.63795424 * 1e8}, + &TokenPayout{"DsXtCxNKnG2D3ANSekLb1z5HcYrne8UPgEL", 282.63795424 * 1e8}, + &TokenPayout{"DsXThCsT3pMLf53PPSD6Y2xBo9mBwLYQEKP", 282.63795424 * 1e8}, + &TokenPayout{"DsXTK91EQ9pNzFStFKmdps2D6ruLQTsyk3W", 282.63795424 * 1e8}, + &TokenPayout{"DsXtkwWvXVdMzuJ9tpMCwL94nDAXoEa9Weq", 282.63795424 * 1e8}, + &TokenPayout{"DsXtR1r5mEmd5toHH5z7a33MpcznWBPWQVo", 282.63795424 * 1e8}, + &TokenPayout{"DsXU22Ns86JeLPW5QD3t4z2LwKjfBbPDJoN", 282.63795424 * 1e8}, + &TokenPayout{"DsXU7Kg8dozutr1uMxZ56KMVo7tRZ94itdJ", 282.63795424 * 1e8}, + &TokenPayout{"DsXuAVcrsetMAAQ4Hf1zXukjefvBJfNpkmU", 282.63795424 * 1e8}, + &TokenPayout{"DsXubdNJoTnBAd6KkzT1uTw7r8AgrBEHBLC", 282.63795424 * 1e8}, + &TokenPayout{"DsXVFCBWrTmF8GYVSgRocz4mzaFDcHgqG9K", 282.63795424 * 1e8}, + &TokenPayout{"DsXvJ9q9PNpJq3WMfoNyYkUkijbuz4dRztt", 282.63795424 * 1e8}, + &TokenPayout{"DsXVrdasXd11pBcQDg5xUWo6AysvtCTrVos", 282.63795424 * 1e8}, + &TokenPayout{"DsXwbSrxkrvdJmE4Sn9ULpAukVAvL5vnAUX", 282.63795424 * 1e8}, + &TokenPayout{"DsXwGuGTCMQNMjsy7xY7WNFtgELWmobVATs", 282.63795424 * 1e8}, + &TokenPayout{"DsXWokejzUe1fmnH2iMtyi971rg3gHEf6Qh", 282.63795424 * 1e8}, + &TokenPayout{"DsXwrAnawMgo3vgMdSJmYDVo33XZSu7bsZN", 282.63795424 * 1e8}, + &TokenPayout{"DsXWuxcPg2mvjV1oDbKWjm57iKLY1YjMggW", 282.63795424 * 1e8}, + &TokenPayout{"DsXxP6si4bFJBB2FuwRAgiJR5m9MK7sUDUw", 282.63795424 * 1e8}, + &TokenPayout{"DsXYEbqgzcvKJNfwsAX6GYDDhr2ytJ67UiW", 282.63795424 * 1e8}, + &TokenPayout{"DsXyh8jsKiQ3Smok5xdtsHpgWxmxoCGTkeY", 282.63795424 * 1e8}, + &TokenPayout{"DsXyMz534cerYhUcEHjCa42UajhGdw4EGA2", 282.63795424 * 1e8}, + &TokenPayout{"DsXZasysVoi5F8eLaygWLtfKTU4M2sLPjq3", 282.63795424 * 1e8}, + &TokenPayout{"DsXzLAkxazRVoAAyyNenNSat7K2eQ6R9V5H", 282.63795424 * 1e8}, + &TokenPayout{"DsXZxNb6V2JQwkjE646jtRo5LFzfrsVSFiu", 282.63795424 * 1e8}, + &TokenPayout{"DsXzxThZ8WqbvGAynBR7w9VsAmJdw3MVFUc", 282.63795424 * 1e8}, + &TokenPayout{"DsY14jSWMZn2UkzpZd3J8prqtmfakBG3FaN", 282.63795424 * 1e8}, + &TokenPayout{"DsY1iKv5Y3Tq48GNar4cFNCGq94KpEvF18S", 282.63795424 * 1e8}, + &TokenPayout{"DsY1jCATERbbBd9PhMDLrQuf6qk3vBW3JRE", 282.63795424 * 1e8}, + &TokenPayout{"DsY1KUqqjzAXwnes27tSW1y1WrJdPJxwpf2", 282.63795424 * 1e8}, + &TokenPayout{"DsY1Qusfh11H59jot6SZ2thNGAVt5AkPVL4", 282.63795424 * 1e8}, + &TokenPayout{"DsY1uBcK1YUimKPFFv6WvQKgiv4mXJdUy1p", 282.63795424 * 1e8}, + &TokenPayout{"DsY2hHcsJ1sAMNKN7B8zcCDGVLhnSw9FRY9", 282.63795424 * 1e8}, + &TokenPayout{"DsY2LVqP3q9xzSA1XQCjSY6GZ4T8xxt6E1L", 282.63795424 * 1e8}, + &TokenPayout{"DsY2P1Y3fhfgFjhhZf57fAnvXrzTccMykYL", 282.63795424 * 1e8}, + &TokenPayout{"DsY2tEKpR9h2QtkrAgQrhW8qTYiKoKcGeHD", 282.63795424 * 1e8}, + &TokenPayout{"DsY2xp4F5QKxuwroWHjxpjZGWpmMAP1pPGu", 282.63795424 * 1e8}, + &TokenPayout{"DsY35prGQgYD2dMaN13md7jMRFVEZmZEAA1", 282.63795424 * 1e8}, + &TokenPayout{"DsY3FNqUvRPX64cnQA2sPvP3UuYB42Usc53", 282.63795424 * 1e8}, + &TokenPayout{"DsY4tHjtBx2ycsGJx95ybwhwYHhwdVzeKJH", 282.63795424 * 1e8}, + &TokenPayout{"DsY5JmCLTrEMc73uq225cE1vAs3n2tGkwuD", 282.63795424 * 1e8}, + &TokenPayout{"DsY61LtttuDiedTPxmtxLM7YKeWR669qG6o", 282.63795424 * 1e8}, + &TokenPayout{"DsY6i5LGc9wAF2fCk8Eei5rks3Ux4WRUxmv", 282.63795424 * 1e8}, + &TokenPayout{"DsY875a5GrUiEBomWEAAvtpaiWjjn5LMHTQ", 282.63795424 * 1e8}, + &TokenPayout{"DsY8QgUsHZhN8hT4gZhw27thmLWutdYPhnw", 282.63795424 * 1e8}, + &TokenPayout{"DsY8uV6yZHRokzxgGM5yxWQWBmZGDwQEvw8", 282.63795424 * 1e8}, + &TokenPayout{"DsY8WuZQ7WkFuQ2UgW95kNgrswVCDrWogUX", 282.63795424 * 1e8}, + &TokenPayout{"DsY9og28XJoZCEvA6LcY1S8Zr4L6xSVMuk2", 282.63795424 * 1e8}, + &TokenPayout{"DsYA1Aoj6JydEJ35xzxvdFLZxPWTnqeFtvv", 282.63795424 * 1e8}, + &TokenPayout{"DsYafLMsGc91xmdcBFUWCdxMUsNE4woaS6W", 282.63795424 * 1e8}, + &TokenPayout{"DsYAFyC2vprPxBBjxzrmjNHnzttyHicpLa6", 282.63795424 * 1e8}, + &TokenPayout{"DsYaMX4MmJ8buuSEWcijNSykGmRYmF42DXD", 282.63795424 * 1e8}, + &TokenPayout{"DsYANJEWNVFGkokTyMubQ11myAhS1Qys58d", 282.63795424 * 1e8}, + &TokenPayout{"DsYavFJw9kT4KfPb1ugJfSXsA6xkeVSuXat", 282.63795424 * 1e8}, + &TokenPayout{"DsYAwbNYGPhiCMrFUbQsbcDy9cAQ4T6KRTA", 282.63795424 * 1e8}, + &TokenPayout{"DsYAY2JA9q8rNfE2GM7rQd5oeu8FgDdxGtc", 282.63795424 * 1e8}, + &TokenPayout{"DsYb36hke5SQ7ADGREFqoasReLHgo3sheyd", 282.63795424 * 1e8}, + &TokenPayout{"DsYB44UMv4f9dqFxxC84UVftWGyTPSM6bUq", 282.63795424 * 1e8}, + &TokenPayout{"DsYB6DgkvX21rr9KzckTRsormdmd4JmNgQo", 282.63795424 * 1e8}, + &TokenPayout{"DsYboMNFiu3W98x6n6B9zAzHzqMCh6XpEQ7", 282.63795424 * 1e8}, + &TokenPayout{"DsYCDGUvBkg29BT1WrauHb4n1maB1tdXZyZ", 282.63795424 * 1e8}, + &TokenPayout{"DsYcnszYyZwubDF3La8t17CUU277BiDhasX", 282.63795424 * 1e8}, + &TokenPayout{"DsYcPe67y1mcox8Rgny9PfrP6L1LGGVhEpA", 282.63795424 * 1e8}, + &TokenPayout{"DsYcrtChWtcx2TteaM6rS4ZXiqTNm21ZZuY", 282.63795424 * 1e8}, + &TokenPayout{"DsYCty5fxJUwi8tyazURyGB9oCvbNS6gtYR", 282.63795424 * 1e8}, + &TokenPayout{"DsYd1jxypqukDvtCCYteXHupMo9NHEWqYnn", 282.63795424 * 1e8}, + &TokenPayout{"DsYdJJdoxRbmwx6TekUSqyey5dhaRmxD5Jx", 282.63795424 * 1e8}, + &TokenPayout{"DsYEg42dCeWZQey4R4XV6i5f7wEMGQfevrv", 282.63795424 * 1e8}, + &TokenPayout{"DsYeVmNUuXxRgTDqbAUDePoSgoWT4i3ouYP", 282.63795424 * 1e8}, + &TokenPayout{"DsYfDDjXXU875JQgpQ9RYqfVCrMeBB1ub57", 282.63795424 * 1e8}, + &TokenPayout{"DsYgDD6Fq8sv5uTQpKsbtF2vaMXrzL2ewgs", 282.63795424 * 1e8}, + &TokenPayout{"DsYGJieMfzgcY47Px2sbezwUgAitg2GTQDn", 282.63795424 * 1e8}, + &TokenPayout{"DsYGqLDSWzMa8Vksrf8TB82aMxE7Cc2moUX", 282.63795424 * 1e8}, + &TokenPayout{"DsYgYrzf632pKNRnQxMpQqVkvVkd7gRxuAo", 282.63795424 * 1e8}, + &TokenPayout{"DsYh34WP73buauwndXMkJhATB1Q1E8Zf7Vz", 282.63795424 * 1e8}, + &TokenPayout{"DsYhF38pAuGgMVBHQ9MwRvFFQ3BFkG2ttkp", 282.63795424 * 1e8}, + &TokenPayout{"DsYhgyV7S3mYUTT13Ugkt2hEydfJPqWLgrN", 282.63795424 * 1e8}, + &TokenPayout{"DsYhy4rqp9VkJkSyTgGLYkvXAUULfbDRT5o", 282.63795424 * 1e8}, + &TokenPayout{"DsYhzeANgB6FBm4i9ZQcmgk4sa441uwnyZC", 282.63795424 * 1e8}, + &TokenPayout{"DsYiiWqBb9aevwwSTMLhfpSxCu7KwzVye9e", 282.63795424 * 1e8}, + &TokenPayout{"DsYiXfCfXru8GYtjvN6Baz1RprZBZX81nHC", 282.63795424 * 1e8}, + &TokenPayout{"DsYixiEVXjGzwMdvgRdZZWdmbvjkNazobkn", 282.63795424 * 1e8}, + &TokenPayout{"DsYjaqnpy7Ur94quEfi9gHQraoVmUGZsevC", 282.63795424 * 1e8}, + &TokenPayout{"DsYjAXez5mBePohCRnMWFjuuQpMp1kCqF5V", 282.63795424 * 1e8}, + &TokenPayout{"DsYjJf7cgQWyeynXk5QcjnuzAeBGU7KdXxg", 282.63795424 * 1e8}, + &TokenPayout{"DsYjmfLCjtzCHZC4p5b7ArbBsn5YeFfraQm", 282.63795424 * 1e8}, + &TokenPayout{"DsYK1drWfG3xUPDDKgHWfSn7rqChSqZZPue", 282.63795424 * 1e8}, + &TokenPayout{"DsYkesQ7Mw4GujzjprMJ6KWLzqKoEJo3K9v", 282.63795424 * 1e8}, + &TokenPayout{"DsYKmNQR6R7jjYp6KWsgNKrnT8wczuRuhVT", 282.63795424 * 1e8}, + &TokenPayout{"DsYKvBKAxWGocpte7hrVx8ewMpTaG7e2X9v", 282.63795424 * 1e8}, + &TokenPayout{"DsYL5diHjA3G673w12sD12esDz6fSS6VMae", 282.63795424 * 1e8}, + &TokenPayout{"DsYLLubvzZUazyEakfLhHRmRteJkw8tSw5N", 282.63795424 * 1e8}, + &TokenPayout{"DsYLWDgE3u2utmu7SLfLXCU3yd4rzQMiQKL", 282.63795424 * 1e8}, + &TokenPayout{"DsYMHY6uKGt7dHUjB57rxrfoFH28arFZfBT", 282.63795424 * 1e8}, + &TokenPayout{"DsYMosssLVStQxG3Yztw2sxXqRWzvqcX2N9", 282.63795424 * 1e8}, + &TokenPayout{"DsYmvzhyb6MZihQScE7rxMPpS4FTd27zMxs", 282.63795424 * 1e8}, + &TokenPayout{"DsYMzLKxoSCV6y8Re4YGFR5X51jpTC97iXt", 282.63795424 * 1e8}, + &TokenPayout{"DsYNMhMm9uhxRfGQxxJMqFnX52WXdcZeKoD", 282.63795424 * 1e8}, + &TokenPayout{"DsYnWFe3hRB21rWwMx9bFabsnNAmPebyfcE", 282.63795424 * 1e8}, + &TokenPayout{"DsYo4t7ChB1PYx1qznL6SX5tvt8PnsgNxkb", 282.63795424 * 1e8}, + &TokenPayout{"DsYoCXrFz6xSTXqQyg5E1hsFFA34fTwZQC8", 282.63795424 * 1e8}, + &TokenPayout{"DsYpBe9VWqiAxAodwb1grNcMQ3c9dMGU7C5", 282.63795424 * 1e8}, + &TokenPayout{"DsYPCqioKYnFcM8WU7r1jS3MbFCGUU81Pe5", 282.63795424 * 1e8}, + &TokenPayout{"DsYPjHiqn6etHVXcBqJPqJ1fUT732pFxvFf", 282.63795424 * 1e8}, + &TokenPayout{"DsYpLxRbKzndYkeBreftxr1GDi4EdHX5Q2o", 282.63795424 * 1e8}, + &TokenPayout{"DsYpm1jCWpxDGoRfoYpUWpjrACLS6BEtRvJ", 282.63795424 * 1e8}, + &TokenPayout{"DsYpvAECEjJiixFh4qBDryX3XNqZHkMJDTE", 282.63795424 * 1e8}, + &TokenPayout{"DsYQAJEayp8K3WPfvXEQDTmg1qCisnpWJ4E", 282.63795424 * 1e8}, + &TokenPayout{"DsYqLhep5UnRhwk6Vh4xCvpQ4BFf4rcDFRK", 282.63795424 * 1e8}, + &TokenPayout{"DsYR3BZwZkq7CApJYgTnJAQtR3GPDzmZGPx", 282.63795424 * 1e8}, + &TokenPayout{"DsYRCGj33GTbWsAWZu1ov6grWQshCSFwMYS", 282.63795424 * 1e8}, + &TokenPayout{"DsYRe78GkvPvtW7rcbMeHbnw5VACQVk9moW", 282.63795424 * 1e8}, + &TokenPayout{"DsYrEZrTVMPJiwjzKuhSKsxkQiBzP2EAa6u", 282.63795424 * 1e8}, + &TokenPayout{"DsYRFb25toeQmhCCaaESPhLPcpXeAjrxdbA", 282.63795424 * 1e8}, + &TokenPayout{"DsYrG2N8vTm2t4sr331fDGCxxxVoHS8aqt8", 282.63795424 * 1e8}, + &TokenPayout{"DsYRQfmnaPFzywEfe3426DuZN3MEm7VPtjA", 282.63795424 * 1e8}, + &TokenPayout{"DsYRrCCYdJfeML63svnEQrrNbZLzYzMJDrW", 282.63795424 * 1e8}, + &TokenPayout{"DsYrtAARdGqoEykgHaG8B9JvZzZxiS9kBie", 282.63795424 * 1e8}, + &TokenPayout{"DsYRXFc5wE7Y91rHkTTADsQF1W6zjkAmozD", 282.63795424 * 1e8}, + &TokenPayout{"DsYrzLYC8sf9jEYymhAwMq4ns9Cp89aJv17", 282.63795424 * 1e8}, + &TokenPayout{"DsYsEbD9vsNgt9nvKTZbtZiAtdEdAL5pWTz", 282.63795424 * 1e8}, + &TokenPayout{"DsYSejJ3DgksFNeEUyJCRzmkXsX7pto56WW", 282.63795424 * 1e8}, + &TokenPayout{"DsYsHSVuGiJ8qU4gCZ5GgK9BFjoskiz7JzM", 282.63795424 * 1e8}, + &TokenPayout{"DsYSsAKwxKPei7yC33WNgPPd6g8VpTj5goj", 282.63795424 * 1e8}, + &TokenPayout{"DsYTkPsSoYqwj9UrgQ7q5TLU96J2Bc2etKN", 282.63795424 * 1e8}, + &TokenPayout{"DsYTotex1WtFvUKxBDKeDAzbS6md7B2wuRH", 282.63795424 * 1e8}, + &TokenPayout{"DsYU7jQ5XJDfZVZrg93hWK8Cn7SsoqyViGB", 282.63795424 * 1e8}, + &TokenPayout{"DsYUFyrQFYPYtGM2JU7JCzYqiygwFCmiGNr", 282.63795424 * 1e8}, + &TokenPayout{"DsYvayZiU2zCCM33PovFGCkPeQ5MTxktqwT", 282.63795424 * 1e8}, + &TokenPayout{"DsYvDyqnXgz4eiEhAK757Zq7AwCXXhSeZnN", 282.63795424 * 1e8}, + &TokenPayout{"DsYvkT6jfCDiBzjsga4PVMXeKEGVxN68oAi", 282.63795424 * 1e8}, + &TokenPayout{"DsYvnMttyrRxEzW7Tm3fD55k5MQzT7fkkLA", 282.63795424 * 1e8}, + &TokenPayout{"DsYWfTrBYcZxK4hRzGxWbpiBbAWEJBgBLAx", 282.63795424 * 1e8}, + &TokenPayout{"DsYwTza2xGBVamC29qNeqhWAY26niPkeTXf", 282.63795424 * 1e8}, + &TokenPayout{"DsYX7dPmC5LKjRogmDkYWN4NFGEGUPtApcw", 282.63795424 * 1e8}, + &TokenPayout{"DsYx9DJzBefnR11WACNcjJ9puC28T8fnY2M", 282.63795424 * 1e8}, + &TokenPayout{"DsYxaxJfx1vFQtJooumyoN3BVRMLe9NhTb2", 282.63795424 * 1e8}, + &TokenPayout{"DsYxEMYx5E8JW6SxPfD85Z8HBM9V7aGMJXL", 282.63795424 * 1e8}, + &TokenPayout{"DsYXk1DUGb17p9ri38WthSY38P4LiUbE762", 282.63795424 * 1e8}, + &TokenPayout{"DsYxo3qbWmpvZP2VZELS9j8TQBUFLgcyJQr", 282.63795424 * 1e8}, + &TokenPayout{"DsYxWihJTQto7dtuAcmXKisrCRAyEh9GiG8", 282.63795424 * 1e8}, + &TokenPayout{"DsYXZkAM4TtEAsF8JCtxPnrEzMD48K27y1s", 282.63795424 * 1e8}, + &TokenPayout{"DsYyAkqtwPats8sFUkuahUp649C3dbXBZAG", 282.63795424 * 1e8}, + &TokenPayout{"DsYYaXM3ed25h28Vdi8G62ckVQivVmDKnCg", 282.63795424 * 1e8}, + &TokenPayout{"DsYYjdkvk6axqmteNGm8vXqm1Z2r9raXLBW", 282.63795424 * 1e8}, + &TokenPayout{"DsYYq43fQwFAxXdpLq2JtFtc55VdWUcszsx", 282.63795424 * 1e8}, + &TokenPayout{"DsYZ21bypA1RomfNdys9W9rukzCUuWZD7RZ", 282.63795424 * 1e8}, + &TokenPayout{"DsYz2JpDwW69fXZU7Y7ERAxDJ4kssRhJmcr", 282.63795424 * 1e8}, + &TokenPayout{"DsYZDdtSghVVcy28e1Fu2m5k2Ny4Sc1CscK", 282.63795424 * 1e8}, + &TokenPayout{"DsYzDzSrTnEX7n35kGgDqUPxGqwUxd3T9wJ", 282.63795424 * 1e8}, + &TokenPayout{"DsYzea3G6dezKiZ2B9SmZjyBjYmdESDwY3H", 282.63795424 * 1e8}, + &TokenPayout{"DsYzEojznsnLfosfD84aT7jXS24k8ZaxZbw", 282.63795424 * 1e8}, + &TokenPayout{"DsZ17FHHa6EzYPED15fzJ3WYf7zw2RTwFzV", 282.63795424 * 1e8}, + &TokenPayout{"DsZ1o6eBxvD2t2xoStdLpNbqpMRHsQ5Sj8M", 282.63795424 * 1e8}, + &TokenPayout{"DsZ1p11tRuqJwqQdTtFS6bNWEmhNDF2P1hK", 282.63795424 * 1e8}, + &TokenPayout{"DsZ1sGesWXNyhSNgMnvtxpMVpogiSDgZ47T", 282.63795424 * 1e8}, + &TokenPayout{"DsZ2EbcVcPKmmzLLwRbpRuDAm346atj3GtF", 282.63795424 * 1e8}, + &TokenPayout{"DsZ2pvHX5MJqC1B8u7mkouiV1obVeJEsNKj", 282.63795424 * 1e8}, + &TokenPayout{"DsZ2r4iY4H7RHyD6craEH9mWNbdkEroya25", 282.63795424 * 1e8}, + &TokenPayout{"DsZ321w5WaNnpsW48vnhWR6456oVdWoBKWw", 282.63795424 * 1e8}, + &TokenPayout{"DsZ356AKWMpmBv5iS5rJGGx9vEcGvzXfmhR", 282.63795424 * 1e8}, + &TokenPayout{"DsZ3bbce8P4ZisnA2RjZMxKztemzewpZouz", 282.63795424 * 1e8}, + &TokenPayout{"DsZ48w81fZ8x6P1aK5mi12PjFuHFvE9fqdd", 282.63795424 * 1e8}, + &TokenPayout{"DsZ4aSc59HX9wkDuXu3PaiDTke254pTcxtM", 282.63795424 * 1e8}, + &TokenPayout{"DsZ4EifXDYBtysw1BLXY4v2duCP4NqRgZqp", 282.63795424 * 1e8}, + &TokenPayout{"DsZ4Ry43NMR2kK6GL8SSotGEJRPvUkjLoqZ", 282.63795424 * 1e8}, + &TokenPayout{"DsZ5bFJtcTZp7iSWPpSiiwVdWq9ro9Fuxaa", 282.63795424 * 1e8}, + &TokenPayout{"DsZ5cJvN243iipmBELSVmQVhRbJ3tkgQ9Km", 282.63795424 * 1e8}, + &TokenPayout{"DsZ62e3V9ETuk7oTagFf8PWUCJurZEWafJh", 282.63795424 * 1e8}, + &TokenPayout{"DsZ69TJ29hkxWvjAe6imfwn9jgBmTBkcvWp", 282.63795424 * 1e8}, + &TokenPayout{"DsZ6CNWbwvmibeMQfwxi2GaSPTdkCNcoUVu", 282.63795424 * 1e8}, + &TokenPayout{"DsZ6x6K4xTWSVaKGn28p4TJm1Vqq5Ppji6A", 282.63795424 * 1e8}, + &TokenPayout{"DsZ6yU2XgdxaacnY4DZTkvnqad2CiGkQdi1", 282.63795424 * 1e8}, + &TokenPayout{"DsZ741vZQS6cC6jWTCrT6yNzqLD3gWTg4Hi", 282.63795424 * 1e8}, + &TokenPayout{"DsZ7rEU9AhbxHEyT5rwnXGJf7RbtfkQYQSc", 282.63795424 * 1e8}, + &TokenPayout{"DsZ8FzHNepk2JYnxDc8jQ3FxvBtCSnNT5mw", 282.63795424 * 1e8}, + &TokenPayout{"DsZ8XpYhookddVJNw95N1YnCFodbmFQSAgp", 282.63795424 * 1e8}, + &TokenPayout{"DsZBeCsXx9nPGLMa6JStP9op357LhLhBNuV", 282.63795424 * 1e8}, + &TokenPayout{"DsZBkJnoZXJvPVovjG5F6BUaFw9dwee9LE4", 282.63795424 * 1e8}, + &TokenPayout{"DsZbmCKWTwmZXGYCXhHHeyoBAMsKGAXwigH", 282.63795424 * 1e8}, + &TokenPayout{"DsZbnM77Uw4DPA4QwbPvRRhcC1Rb162r4dY", 282.63795424 * 1e8}, + &TokenPayout{"DsZbQUPDwZpKbg4TCbEKGbnfPE9fGGx8DtP", 282.63795424 * 1e8}, + &TokenPayout{"DsZBTkBrjCEPNTBDYEQNCU7E9x1nP4he1fH", 282.63795424 * 1e8}, + &TokenPayout{"DsZBUBAW5riYFKUYMhrrnuxpcirPk9xdvJx", 282.63795424 * 1e8}, + &TokenPayout{"DsZBwJVnoHXXFKinjdTLfnQftgpUywwKfz9", 282.63795424 * 1e8}, + &TokenPayout{"DsZC8idNMtg3TMUVNt1LTaetwG2Ec4dpQE8", 282.63795424 * 1e8}, + &TokenPayout{"DsZCLzY5ePN1VABv4tDxAf7jUzcHPEZaZnV", 282.63795424 * 1e8}, + &TokenPayout{"DsZCvZejCeVeTgibXgYYc6QmU93rCBj1RXA", 282.63795424 * 1e8}, + &TokenPayout{"DsZDimsuQBvmyYtv3RbopioXfgbWVpUxVeU", 282.63795424 * 1e8}, + &TokenPayout{"DsZE2zVQo3bkvjLJAZwmf1WmvSSLKMgVB3V", 282.63795424 * 1e8}, + &TokenPayout{"DsZeNUz8wEpkNhHbwAWe4T5VPXAap8FHnMS", 282.63795424 * 1e8}, + &TokenPayout{"DsZePCqbf8USspeDZKj3FKQvWzZVFMf5m2X", 282.63795424 * 1e8}, + &TokenPayout{"DsZeSpyK4wgKHRRfKf99qNb8rRtNJbLvrY6", 282.63795424 * 1e8}, + &TokenPayout{"DsZetsRDarHoDNkuKXLGRjpkgtvmS5nn9xk", 282.63795424 * 1e8}, + &TokenPayout{"DsZeUSxRcjnahYxRpw95rg2ov8thARnrn9w", 282.63795424 * 1e8}, + &TokenPayout{"DsZfbyJpw2WXxGznwWgLzguPvvxrRitqM2u", 282.63795424 * 1e8}, + &TokenPayout{"DsZfEBw31pWpmk3wxRAt3HRUETiS1bxW8JF", 282.63795424 * 1e8}, + &TokenPayout{"DsZFLYVuy3YPJRM92ogafQR6zdfs15Ubraq", 282.63795424 * 1e8}, + &TokenPayout{"DsZGMP63ZS3JYTox9YEEqPQLRP7mjNtvL1V", 282.63795424 * 1e8}, + &TokenPayout{"DsZGoC7pm1jiNVBEUsmHfPV9iTsZ1KVeCUd", 282.63795424 * 1e8}, + &TokenPayout{"DsZhgCc4KasZ7pWZfPGb5sCPkKYkGfgdM8F", 282.63795424 * 1e8}, + &TokenPayout{"DsZHPPqLMStyXLKc4vKRth5ZDqfbrxDGo7F", 282.63795424 * 1e8}, + &TokenPayout{"DsZHuuHfm3mF7gCvYedUo9ndU3GDvBpfNzR", 282.63795424 * 1e8}, + &TokenPayout{"DsZHW7SuFR6sjJnaGRgdrFVBcfc1nyQjxqk", 282.63795424 * 1e8}, + &TokenPayout{"DsZigdRtSJ2E6WDDd4PHfVAbFHVehYcPg2M", 282.63795424 * 1e8}, + &TokenPayout{"DsZixaLJfrKoe7TUhdZ6zDfyvmHxWqgtxHB", 282.63795424 * 1e8}, + &TokenPayout{"DsZjefpJsadmqxDVsQeuytGMGGtMzGC25fY", 282.63795424 * 1e8}, + &TokenPayout{"DsZjZJd1Fk9CdPaWkyLCnTS4o2pVkGyZpcC", 282.63795424 * 1e8}, + &TokenPayout{"DsZk645Jy1DqfihVkikaEGZPvSSJJektLgb", 282.63795424 * 1e8}, + &TokenPayout{"DsZkBLRVWvRJXNaHLUJ94T9CKjWtcRR7ZRR", 282.63795424 * 1e8}, + &TokenPayout{"DsZkkG8HwWxJCjsj1UQTCY9AztSzQV5dj61", 282.63795424 * 1e8}, + &TokenPayout{"DsZKNz642enQR3dCHugicRBSnaFpVZdpZkn", 282.63795424 * 1e8}, + &TokenPayout{"DsZKqbamUz3VF7GvRHtwX1cPGt87tVQb1UV", 282.63795424 * 1e8}, + &TokenPayout{"DsZkQhBrvwnjAA4wHn6NfddbHbUDpumRP5c", 282.63795424 * 1e8}, + &TokenPayout{"DsZkWqaibhd5paN9KwHvHEpuEL4g3RsypzN", 282.63795424 * 1e8}, + &TokenPayout{"DsZkx5FoVSZrP9X2T7EDSPmmr1eeqUEGNdB", 282.63795424 * 1e8}, + &TokenPayout{"DsZmrDM7fiFMYRRcsGoDwWWYt1CjPHPKZ2D", 282.63795424 * 1e8}, + &TokenPayout{"DsZmVdYUunJGB8kdTaqfEmzPA18RcXjoZPq", 282.63795424 * 1e8}, + &TokenPayout{"DsZmx3u4p9NWcFxHgGRp3PhVM6yt86hru4c", 282.63795424 * 1e8}, + &TokenPayout{"DsZmx7xxHCURABQbUDTHnmoF7qRYfaeRoks", 282.63795424 * 1e8}, + &TokenPayout{"DsZN2XC4ZUcVYgyjgeG29WVJnec413GDDue", 282.63795424 * 1e8}, + &TokenPayout{"DsZN8kRNejpeSm18Fv8ozFbLa5vuboA6RWw", 282.63795424 * 1e8}, + &TokenPayout{"DsZNaUaX7Ufdttr5hz7A1NceM3dyaheCXXR", 282.63795424 * 1e8}, + &TokenPayout{"DsZNf4BFuPaAtohkrn6YNjC2MPAG3VS9928", 282.63795424 * 1e8}, + &TokenPayout{"DsZNFjA8CucGrxYm9r3AEfAfV5cT2d7kyCs", 282.63795424 * 1e8}, + &TokenPayout{"DsZnxQKkA8UERcTeek4rCJSYvZVcSPMDjbn", 282.63795424 * 1e8}, + &TokenPayout{"DsZNXZK2r6hxt4TDaDrz8sWQ49t8weLrV41", 282.63795424 * 1e8}, + &TokenPayout{"DsZooW6n3b5Qmne9VCEspDF3ptDHjSVCoTC", 282.63795424 * 1e8}, + &TokenPayout{"DsZoTZxNF1mJWSAqctJ7q16hCv4pxvoWFmM", 282.63795424 * 1e8}, + &TokenPayout{"DsZp1uyQjQnsH7gQLNiov46NACXFaQtfDEf", 282.63795424 * 1e8}, + &TokenPayout{"DsZpgRVCKYPYtqZSLDxQaorjCVQXbiiibr2", 282.63795424 * 1e8}, + &TokenPayout{"DsZPHzypEx5624mhkY5yF3w1vreWi54bp1i", 282.63795424 * 1e8}, + &TokenPayout{"DsZqBeAMeNPNP44j5uWGVLz3bFWhRsLNE7J", 282.63795424 * 1e8}, + &TokenPayout{"DsZqbEKEwq8SzA4caqmaJoHKdgz2RmZUPsQ", 282.63795424 * 1e8}, + &TokenPayout{"DsZqDXydCBs6K2wVuobV8LP64wHDUKJ9Kmd", 282.63795424 * 1e8}, + &TokenPayout{"DsZqib9DUKtZrPSC79ASyBY5kcvf62J9cRn", 282.63795424 * 1e8}, + &TokenPayout{"DsZqiEca2PX95f9nhtUTrsnzTCijM9TZD6q", 282.63795424 * 1e8}, + &TokenPayout{"DsZQpiZqNU7uLcB5bTFVAQoeNei825cuCSL", 282.63795424 * 1e8}, + &TokenPayout{"DsZQtC5dnNWuxDhGF9S4XuFxPs25m8wV7j2", 282.63795424 * 1e8}, + &TokenPayout{"DsZquB9R99SngoGWYrRe6VMdfXmft5mbMn5", 282.63795424 * 1e8}, + &TokenPayout{"DsZQZ7chF1fjXg5LxvkRystGWfDiCKfQR41", 282.63795424 * 1e8}, + &TokenPayout{"DsZQZ9Yb4JjuAiYPqQJ7bsMPm8kS5rLLENc", 282.63795424 * 1e8}, + &TokenPayout{"DsZr7e6nEV1qHnSxnqgd1dHPoNbk7eP33t3", 282.63795424 * 1e8}, + &TokenPayout{"DsZr83erUCJ1gwYv6xTvX44gMtMgpz2xHYZ", 282.63795424 * 1e8}, + &TokenPayout{"DsZRodYCTMA2gMC2PUUNXXS7bmcY1QGNgtJ", 282.63795424 * 1e8}, + &TokenPayout{"DsZRuUzbeLgEU9MYNtfCbHZkA8xUPByEK4X", 282.63795424 * 1e8}, + &TokenPayout{"DsZRXrCzNDvrz1QiCCHwcpoa7Dd5HA7Ha8X", 282.63795424 * 1e8}, + &TokenPayout{"DsZRzD3KsvL7BPN4zJwa6GYBuA7PKTrQX2M", 282.63795424 * 1e8}, + &TokenPayout{"DsZSFk8XMjAMLgbKvTubQQPK9y2A9uxDfzf", 282.63795424 * 1e8}, + &TokenPayout{"DsZSHMQgFXnms6WRDXRV8h6Gm3xy3d32tPK", 282.63795424 * 1e8}, + &TokenPayout{"DsZsN9AquaErX6h3Pj3aFo1Ah5kEykUQ299", 282.63795424 * 1e8}, + &TokenPayout{"DsZSPvbTx6ENLQipw9GanPfM4zNtaXMbqzU", 282.63795424 * 1e8}, + &TokenPayout{"DsZSU8kbXDy63op2jnhkLmoaPkRqVWT35CW", 282.63795424 * 1e8}, + &TokenPayout{"DsZsWC7apjH2A3e6WKR42a6ZR5FcN5vvg9R", 282.63795424 * 1e8}, + &TokenPayout{"DsZsxUiWAwHchbqFUfvx1hW2CdTdKgFovPx", 282.63795424 * 1e8}, + &TokenPayout{"DsZt2vCk1skx6QALK4aY4CFCFFYCmVgmbrK", 282.63795424 * 1e8}, + &TokenPayout{"DsZt4GPEWPvFgFg2xHdgbtgagpdir9JrgZB", 282.63795424 * 1e8}, + &TokenPayout{"DsZT5XmRL25SZF2b3cqy4iNAA2MvuawXuUQ", 282.63795424 * 1e8}, + &TokenPayout{"DsZt64QHeXwm7X5famQhehJipDXooNwbd49", 282.63795424 * 1e8}, + &TokenPayout{"DsZTV6Ka2M62YNwCdy6kUXwK9q4kXGB4jdJ", 282.63795424 * 1e8}, + &TokenPayout{"DsZTXUE5CGcQzFyNfDMKQ5K8MCgJViE8drZ", 282.63795424 * 1e8}, + &TokenPayout{"DsZUC9eHqeUTYymJ3b9eHu92r9hnFiUbPDk", 282.63795424 * 1e8}, + &TokenPayout{"DsZw2ycjUhjeGMNAcAzMmet9hScprcavHGy", 282.63795424 * 1e8}, + &TokenPayout{"DsZW5pkk2vEEdSqKsbUwrmTdJiFd4NQxPmS", 282.63795424 * 1e8}, + &TokenPayout{"DsZWc5RKohGqDDWLAgdeAgPCK2Uu9kyCA3T", 282.63795424 * 1e8}, + &TokenPayout{"DsZwsZL9fsFDt9QhDdzcVm5bYPayDq91qoE", 282.63795424 * 1e8}, + &TokenPayout{"DsZx1GK4nFfQJuqc23Abx1PKxdNqUZ4v1Aq", 282.63795424 * 1e8}, + &TokenPayout{"DsZxN2aPEqEFsSsHV5qP6rmCznQuRSi5A3C", 282.63795424 * 1e8}, + &TokenPayout{"DsZxWGaa8NzmvRKb7LFhSHAsepgHEpi5aEo", 282.63795424 * 1e8}, + &TokenPayout{"DsZY7fWssfuGkMY1w58MkLtQxj8d37b4S9G", 282.63795424 * 1e8}, + &TokenPayout{"DsZYQ57mK5SSuceDX5LPpGEFg7DWiecHLuu", 282.63795424 * 1e8}, + &TokenPayout{"DsZYtMT7jac8P25TzgCcc33iAdWfgetWuA9", 282.63795424 * 1e8}, + &TokenPayout{"DsZz1kCrLe71JocmGQcfKCYofEjFfr4sEak", 282.63795424 * 1e8}, + &TokenPayout{"DsZZe5aZb4c163CNhYow8KxJrNNeGAs5QyE", 282.63795424 * 1e8}, + &TokenPayout{"DsZZsYydXnoac5z3LnwVR7H4uwUeTTgTMGf", 282.63795424 * 1e8}, + &TokenPayout{"DsZZX6MSkR1RWThuZkmtrcWs5exTHiVUrE3", 282.63795424 * 1e8}, +} + +// BlockOneLedgerTestNet is the block one output ledger for the test +// network. +var BlockOneLedgerTestNet = []*TokenPayout{ + &TokenPayout{"TsmWaPM77WSyA3aiQ2Q1KnwGDVWvEkhipBc", 100000 * 1e8}, +} + +// BlockOneLedgerSimNet is the block one output ledger for the simulation +// network. See under "Decred organization related parameters" in params.go +// for information on how to spend these outputs. +var BlockOneLedgerSimNet = []*TokenPayout{ + &TokenPayout{"Sshw6S86G2bV6W32cbc7EhtFy8f93rU6pae", 100000 * 1e8}, + &TokenPayout{"SsjXRK6Xz6CFuBt6PugBvrkdAa4xGbcZ18w", 100000 * 1e8}, + &TokenPayout{"SsfXiYkYkCoo31CuVQw428N6wWKus2ZEw5X", 100000 * 1e8}, +} diff --git a/chaincfg/register_test.go b/chaincfg/register_test.go index b5ac392b..4448c355 100644 --- a/chaincfg/register_test.go +++ b/chaincfg/register_test.go @@ -1,3 +1,7 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + package chaincfg_test import ( @@ -5,7 +9,7 @@ import ( "reflect" "testing" - . "github.com/btcsuite/btcd/chaincfg" + . "github.com/decred/dcrd/chaincfg" ) // Define some of the required parameters for a user-registered @@ -14,8 +18,8 @@ import ( var mockNetParams = Params{ Name: "mocknet", Net: 1<<32 - 1, - PubKeyHashAddrID: 0x9f, - ScriptHashAddrID: 0xf9, + PubKeyHashAddrID: [2]byte{0x9f}, + ScriptHashAddrID: [2]byte{0xf9}, HDPrivateKeyID: [4]byte{0x01, 0x02, 0x03, 0x04}, HDPublicKeyID: [4]byte{0x05, 0x06, 0x07, 0x08}, } @@ -27,7 +31,7 @@ func TestRegister(t *testing.T) { err error } type magicTest struct { - magic byte + magic [2]byte valid bool } type hdTest struct { @@ -52,13 +56,8 @@ func TestRegister(t *testing.T) { err: ErrDuplicateNet, }, { - name: "duplicate regtest", - params: &RegressionNetParams, - err: ErrDuplicateNet, - }, - { - name: "duplicate testnet3", - params: &TestNet3Params, + name: "duplicate testnet", + params: &TestNetParams, err: ErrDuplicateNet, }, { @@ -73,11 +72,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.PubKeyHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.PubKeyHashAddrID, + magic: TestNetParams.PubKeyHashAddrID, valid: true, }, { @@ -89,7 +84,7 @@ func TestRegister(t *testing.T) { valid: false, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -99,11 +94,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.ScriptHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.ScriptHashAddrID, + magic: TestNetParams.ScriptHashAddrID, valid: true, }, { @@ -115,7 +106,7 @@ func TestRegister(t *testing.T) { valid: false, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -126,13 +117,8 @@ func TestRegister(t *testing.T) { err: nil, }, { - priv: TestNet3Params.HDPrivateKeyID[:], - want: TestNet3Params.HDPublicKeyID[:], - err: nil, - }, - { - priv: RegressionNetParams.HDPrivateKeyID[:], - want: RegressionNetParams.HDPublicKeyID[:], + priv: TestNetParams.HDPrivateKeyID[:], + want: TestNetParams.HDPublicKeyID[:], err: nil, }, { @@ -169,11 +155,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.PubKeyHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.PubKeyHashAddrID, + magic: TestNetParams.PubKeyHashAddrID, valid: true, }, { @@ -185,7 +167,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -195,11 +177,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.ScriptHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.ScriptHashAddrID, + magic: TestNetParams.ScriptHashAddrID, valid: true, }, { @@ -211,7 +189,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -232,13 +210,8 @@ func TestRegister(t *testing.T) { err: ErrDuplicateNet, }, { - name: "duplicate regtest", - params: &RegressionNetParams, - err: ErrDuplicateNet, - }, - { - name: "duplicate testnet3", - params: &TestNet3Params, + name: "duplicate testnet", + params: &TestNetParams, err: ErrDuplicateNet, }, { @@ -258,11 +231,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.PubKeyHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.PubKeyHashAddrID, + magic: TestNetParams.PubKeyHashAddrID, valid: true, }, { @@ -274,7 +243,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -284,11 +253,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: TestNet3Params.ScriptHashAddrID, - valid: true, - }, - { - magic: RegressionNetParams.ScriptHashAddrID, + magic: TestNetParams.ScriptHashAddrID, valid: true, }, { @@ -300,7 +265,7 @@ func TestRegister(t *testing.T) { valid: true, }, { - magic: 0xFF, + magic: [2]byte{0xFF}, valid: false, }, }, @@ -311,13 +276,8 @@ func TestRegister(t *testing.T) { err: nil, }, { - priv: TestNet3Params.HDPrivateKeyID[:], - want: TestNet3Params.HDPublicKeyID[:], - err: nil, - }, - { - priv: RegressionNetParams.HDPrivateKeyID[:], - want: RegressionNetParams.HDPublicKeyID[:], + priv: TestNetParams.HDPrivateKeyID[:], + want: TestNetParams.HDPublicKeyID[:], err: nil, }, { diff --git a/chainindexer.go b/chainindexer.go index fd9000e9..82c46da8 100644 --- a/chainindexer.go +++ b/chainindexer.go @@ -1,21 +1,23 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( - "container/heap" "fmt" - "runtime" "sync" "sync/atomic" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" + "github.com/btcsuite/golangcrypto/ripemd160" ) @@ -38,25 +40,6 @@ const ( indexMaintain ) -// Limit the number of goroutines that concurrently -// build the index to catch up based on the number -// of processor cores. This help ensure the system -// stays reasonably responsive under heavy load. -var numCatchUpWorkers = runtime.NumCPU() * 3 - -// indexBlockMsg packages a request to have the addresses of a block indexed. -type indexBlockMsg struct { - blk *btcutil.Block - done chan struct{} -} - -// writeIndexReq represents a request to have a completed address index -// committed to the database. -type writeIndexReq struct { - blk *btcutil.Block - addrIndex database.BlockAddrIndex -} - // addrIndexer provides a concurrent service for indexing the transactions of // target blocks based on the addresses involved in the transaction. type addrIndexer struct { @@ -64,10 +47,6 @@ type addrIndexer struct { started int32 shutdown int32 state indexState - quit chan struct{} - wg sync.WaitGroup - addrIndexJobs chan *indexBlockMsg - writeRequests chan *writeIndexReq progressLogger *blockProgressLogger currentIndexTip int64 chainTip int64 @@ -96,10 +75,7 @@ func newAddrIndexer(s *server) (*addrIndexer, error) { ai := &addrIndexer{ server: s, - quit: make(chan struct{}), state: state, - addrIndexJobs: make(chan *indexBlockMsg), - writeRequests: make(chan *writeIndexReq, numCatchUpWorkers), currentIndexTip: lastIndexedHeight, chainTip: chainHeight, progressLogger: newBlockProgressLogger("Indexed addresses of", @@ -115,9 +91,11 @@ func (a *addrIndexer) Start() { return } adxrLog.Trace("Starting address indexer") - a.wg.Add(2) - go a.indexManager() - go a.indexWriter() + err := a.initialize() + if err != nil { + adxrLog.Errorf("Couldn't start address indexer: %v", err.Error()) + return + } } // Stop gracefully shuts down the address indexer by stopping all ongoing @@ -129,8 +107,6 @@ func (a *addrIndexer) Stop() error { return nil } adxrLog.Infof("Address indexer shutting down") - close(a.quit) - a.wg.Wait() return nil } @@ -142,351 +118,342 @@ func (a *addrIndexer) IsCaughtUp() bool { return a.state == indexMaintain } -// indexManager creates, and oversees worker index goroutines. -// indexManager is the main goroutine for the addresses indexer. -// It creates, and oversees worker goroutines to index incoming blocks, with -// the exact behavior depending on the current index state -// (catch up, vs maintain). Completion of catch-up mode is always proceeded by -// a gracefull transition into "maintain" mode. -// NOTE: Must be run as a goroutine. -func (a *addrIndexer) indexManager() { +// initialize starts the address indexer and fills the database up to the +// top height of the current database. +func (a *addrIndexer) initialize() error { if a.state == indexCatchUp { adxrLog.Infof("Building up address index from height %v to %v.", a.currentIndexTip+1, a.chainTip) - // Quit semaphores to gracefully shut down our worker tasks. - runningWorkers := make([]chan struct{}, 0, numCatchUpWorkers) - shutdownWorkers := func() { - for _, quit := range runningWorkers { - close(quit) - } - } - criticalShutdown := func() { - shutdownWorkers() - a.server.Stop() - } - - // Spin up all of our "catch up" worker goroutines, giving them - // a quit channel and WaitGroup so we can gracefully exit if - // needed. - var workerWg sync.WaitGroup - catchUpChan := make(chan *indexBlockMsg) - for i := 0; i < numCatchUpWorkers; i++ { - quit := make(chan struct{}) - runningWorkers = append(runningWorkers, quit) - workerWg.Add(1) - go a.indexCatchUpWorker(catchUpChan, &workerWg, quit) - } // Starting from the next block after our current index tip, // feed our workers each successive block to index until we've // caught up to the current highest block height. lastBlockIdxHeight := a.currentIndexTip + 1 for lastBlockIdxHeight <= a.chainTip { - targetSha, err := a.server.db.FetchBlockShaByHeight(lastBlockIdxHeight) - if err != nil { - adxrLog.Errorf("Unable to look up the sha of the "+ - "next target block (height %v): %v", - lastBlockIdxHeight, err) - criticalShutdown() - goto fin - } - targetBlock, err := a.server.db.FetchBlockBySha(targetSha) - if err != nil { - // Unable to locate a target block by sha, this - // is a critical error, we may have an - // inconsistency in the DB. - adxrLog.Errorf("Unable to look up the next "+ - "target block (sha %v): %v", targetSha, err) - criticalShutdown() - goto fin - } + // Skip the genesis block. + if !(lastBlockIdxHeight == 0) { + targetSha, err := a.server.db.FetchBlockShaByHeight( + lastBlockIdxHeight) + if err != nil { + return fmt.Errorf("Unable to look up the sha of the "+ + "next target block (height %v): %v", + lastBlockIdxHeight, err) + } + targetBlock, err := a.server.db.FetchBlockBySha(targetSha) + if err != nil { + // Unable to locate a target block by sha, this + // is a critical error, we may have an + // inconsistency in the DB. + return fmt.Errorf("Unable to look up the next "+ + "target block (sha %v): %v", targetSha, err) + } + targetParent, err := a.server.db.FetchBlockBySha( + &targetBlock.MsgBlock().Header.PrevBlock) + if err != nil { + // Unable to locate a target block by sha, this + // is a critical error, we may have an + // inconsistency in the DB. + return fmt.Errorf("Unable to look up the next "+ + "target block parent (sha %v): %v", + targetBlock.MsgBlock().Header.PrevBlock, err) + } - // Send off the next job, ready to exit if a shutdown is - // signalled. - indexJob := &indexBlockMsg{blk: targetBlock} - select { - case catchUpChan <- indexJob: - lastBlockIdxHeight++ - case <-a.quit: - shutdownWorkers() - goto fin - } - _, a.chainTip, err = a.server.db.NewestSha() - if err != nil { - adxrLog.Errorf("Unable to get latest block height: %v", err) - criticalShutdown() - goto fin + addrIndex, err := a.indexBlockAddrs(targetBlock, targetParent) + if err != nil { + return fmt.Errorf("Unable to index transactions of"+ + " block: %v", err) + } + err = a.server.db.UpdateAddrIndexForBlock(targetSha, + lastBlockIdxHeight, + addrIndex) + if err != nil { + return fmt.Errorf("Unable to insert block: %v", err.Error()) + } } + lastBlockIdxHeight++ } a.Lock() a.state = indexMaintain a.Unlock() - - // We've finished catching up. Signal our workers to quit, and - // wait until they've all finished. - shutdownWorkers() - workerWg.Wait() } - adxrLog.Infof("Address indexer has caught up to best height, entering " + - "maintainence mode") + adxrLog.Debugf("Address indexer has queued up to best height, safe " + + "to begin maintainence mode") - // We're all caught up at this point. We now serially process new jobs - // coming in. - for { - select { - case indexJob := <-a.addrIndexJobs: - addrIndex, err := a.indexBlockAddrs(indexJob.blk) - if err != nil { - adxrLog.Errorf("Unable to index transactions of"+ - " block: %v", err) - a.server.Stop() - goto fin - } - a.writeRequests <- &writeIndexReq{blk: indexJob.blk, - addrIndex: addrIndex} - case <-a.quit: - goto fin - } - } -fin: - a.wg.Done() -} - -// UpdateAddressIndex asynchronously queues a newly solved block to have its -// transactions indexed by address. -func (a *addrIndexer) UpdateAddressIndex(block *btcutil.Block) { - go func() { - job := &indexBlockMsg{blk: block} - a.addrIndexJobs <- job - }() -} - -// pendingIndexWrites writes is a priority queue which is used to ensure the -// address index of the block height N+1 is written when our address tip is at -// height N. This ordering is necessary to maintain index consistency in face -// of our concurrent workers, which may not necessarily finish in the order the -// jobs are handed out. -type pendingWriteQueue []*writeIndexReq - -// Len returns the number of items in the priority queue. It is part of the -// heap.Interface implementation. -func (pq pendingWriteQueue) Len() int { return len(pq) } - -// Less returns whether the item in the priority queue with index i should sort -// before the item with index j. It is part of the heap.Interface implementation. -func (pq pendingWriteQueue) Less(i, j int) bool { - return pq[i].blk.Height() < pq[j].blk.Height() -} - -// Swap swaps the items at the passed indices in the priority queue. It is -// part of the heap.Interface implementation. -func (pq pendingWriteQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } - -// Push pushes the passed item onto the priority queue. It is part of the -// heap.Interface implementation. -func (pq *pendingWriteQueue) Push(x interface{}) { - *pq = append(*pq, x.(*writeIndexReq)) -} - -// Pop removes the highest priority item (according to Less) from the priority -// queue and returns it. It is part of the heap.Interface implementation. -func (pq *pendingWriteQueue) Pop() interface{} { - n := len(*pq) - item := (*pq)[n-1] - (*pq)[n-1] = nil - *pq = (*pq)[0 : n-1] - return item -} - -// indexWriter commits the populated address indexes created by the -// catch up workers to the database. Since we have concurrent workers, the writer -// ensures indexes are written in ascending order to avoid a possible gap in the -// address index triggered by an unexpected shutdown. -// NOTE: Must be run as a goroutine -func (a *addrIndexer) indexWriter() { - var pendingWrites pendingWriteQueue - minHeightWrite := make(chan *writeIndexReq) - workerQuit := make(chan struct{}) - writeFinished := make(chan struct{}, 1) - - // Spawn a goroutine to feed our writer address indexes such - // that, if our address tip is at N, the index for block N+1 is always - // written first. We use a priority queue to enforce this condition - // while accepting new write requests. - go func() { - for { - top: - select { - case incomingWrite := <-a.writeRequests: - heap.Push(&pendingWrites, incomingWrite) - - // Check if we've found a write request that - // satisfies our condition. If we have, then - // chances are we have some backed up requests - // which wouldn't be written until a previous - // request showed up. If this is the case we'll - // quickly flush our heap of now available in - // order writes. We also accept write requests - // with a block height *before* the current - // index tip, in order to re-index new prior - // blocks added to the main chain during a - // re-org. - writeReq := heap.Pop(&pendingWrites).(*writeIndexReq) - _, addrTip, _ := a.server.db.FetchAddrIndexTip() - for writeReq.blk.Height() == (addrTip+1) || - writeReq.blk.Height() <= addrTip { - minHeightWrite <- writeReq - - // Wait for write to finish so we get a - // fresh view of the addrtip. - <-writeFinished - - // Break to grab a new write request - if pendingWrites.Len() == 0 { - break top - } - - writeReq = heap.Pop(&pendingWrites).(*writeIndexReq) - _, addrTip, _ = a.server.db.FetchAddrIndexTip() - } - - // We haven't found the proper write request yet, - // push back onto our heap and wait for the next - // request which may be our target write. - heap.Push(&pendingWrites, writeReq) - case <-workerQuit: - return - } - } - }() - -out: - // Our main writer loop. Here we actually commit the populated address - // indexes to the database. - for { - select { - case nextWrite := <-minHeightWrite: - sha := nextWrite.blk.Sha() - height := nextWrite.blk.Height() - err := a.server.db.UpdateAddrIndexForBlock(sha, height, - nextWrite.addrIndex) - if err != nil { - adxrLog.Errorf("Unable to write index for block, "+ - "sha %v, height %v", sha, height) - a.server.Stop() - break out - } - writeFinished <- struct{}{} - a.progressLogger.LogBlockHeight(nextWrite.blk) - case <-a.quit: - break out - } - - } - close(workerQuit) - a.wg.Done() -} - -// indexCatchUpWorker indexes the transactions of previously validated and -// stored blocks. -// NOTE: Must be run as a goroutine -func (a *addrIndexer) indexCatchUpWorker(workChan chan *indexBlockMsg, - wg *sync.WaitGroup, quit chan struct{}) { -out: - for { - select { - case indexJob := <-workChan: - addrIndex, err := a.indexBlockAddrs(indexJob.blk) - if err != nil { - adxrLog.Errorf("Unable to index transactions of"+ - " block: %v", err) - a.server.Stop() - break out - } - a.writeRequests <- &writeIndexReq{blk: indexJob.blk, - addrIndex: addrIndex} - case <-quit: - break out - } - } - wg.Done() -} - -// indexScriptPubKey indexes all data pushes greater than 8 bytes within the -// passed SPK. Our "address" index is actually a hash160 index, where in the -// ideal case the data push is either the hash160 of a publicKey (P2PKH) or -// a Script (P2SH). -func indexScriptPubKey(addrIndex database.BlockAddrIndex, scriptPubKey []byte, - locInBlock *wire.TxLoc) error { - dataPushes, err := txscript.PushedData(scriptPubKey) - if err != nil { - adxrLog.Tracef("Couldn't get pushes: %v", err) - return err - } - - for _, data := range dataPushes { - // Only index pushes greater than 8 bytes. - if len(data) < 8 { - continue - } - - var indexKey [ripemd160.Size]byte - // A perfect little hash160. - if len(data) <= 20 { - copy(indexKey[:], data) - // Otherwise, could be a payToPubKey or an OP_RETURN, so we'll - // make a hash160 out of it. - } else { - copy(indexKey[:], btcutil.Hash160(data)) - } - - addrIndex[indexKey] = append(addrIndex[indexKey], locInBlock) - } return nil } +// convertToAddrIndex indexes all data pushes greater than 8 bytes within the +// passed SPK and returns a TxAddrIndex with the given data. Our "address" +// index is actually a hash160 index, where in the ideal case the data push +// is either the hash160 of a publicKey (P2PKH) or a Script (P2SH). +func convertToAddrIndex(scrVersion uint16, scr []byte, height int64, + locInBlock *wire.TxLoc) ([]*database.TxAddrIndex, error) { + var tais []*database.TxAddrIndex + + if scr == nil || locInBlock == nil { + return nil, fmt.Errorf("passed nil pointer") + } + + var indexKey [ripemd160.Size]byte + + // Get the script classes and extract the PKH if applicable. + // If it's multisig, unknown, etc, just hash the script itself. + class, addrs, _, err := txscript.ExtractPkScriptAddrs(scrVersion, scr, + activeNetParams.Params) + if err != nil { + return nil, fmt.Errorf("script conversion error") + } + knownType := false + for _, addr := range addrs { + switch { + case class == txscript.PubKeyTy: + copy(indexKey[:], addr.Hash160()[:]) + case class == txscript.PubkeyAltTy: + copy(indexKey[:], addr.Hash160()[:]) + case class == txscript.PubKeyHashTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.PubkeyHashAltTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.StakeSubmissionTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.StakeGenTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.StakeRevocationTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.StakeSubChangeTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.MultiSigTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + case class == txscript.ScriptHashTy: + copy(indexKey[:], addr.ScriptAddress()[:]) + } + tai := &database.TxAddrIndex{ + indexKey, + uint32(height), + uint32(locInBlock.TxStart), + uint32(locInBlock.TxLen), + } + + tais = append(tais, tai) + knownType = true + } + + if !knownType { + copy(indexKey[:], dcrutil.Hash160(scr)) + tai := &database.TxAddrIndex{ + indexKey, + uint32(height), + uint32(locInBlock.TxStart), + uint32(locInBlock.TxLen), + } + + tais = append(tais, tai) + } + + return tais, nil +} + +// lookupTransaction is a special transaction lookup function that searches +// the database, the block, and its parent for a transaction. This is needed +// because indexBlockAddrs is called AFTER a block is added/removed in the +// blockchain in blockManager, necessitating that the blocks internally be +// searched for inputs for any given transaction too. Additionally, it's faster +// to get the tx from the blocks here since they're already +func (a *addrIndexer) lookupTransaction(txHash chainhash.Hash, blk *dcrutil.Block, + parent *dcrutil.Block) (*wire.MsgTx, error) { + // Search the previous block and parent first. + txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, + dcrutil.BlockValid) + + // Search the regular tx tree of this and the last block if the + // tx tree regular was validated. + if txTreeRegularValid { + for _, stx := range parent.STransactions() { + if stx.Sha().IsEqual(&txHash) { + return stx.MsgTx(), nil + } + } + for _, tx := range parent.Transactions() { + if tx.Sha().IsEqual(&txHash) { + return tx.MsgTx(), nil + } + } + for _, tx := range blk.Transactions() { + if tx.Sha().IsEqual(&txHash) { + return tx.MsgTx(), nil + } + } + } else { + // Just search this block's regular tx tree and the previous + // block's stake tx tree. + for _, stx := range parent.STransactions() { + if stx.Sha().IsEqual(&txHash) { + return stx.MsgTx(), nil + } + } + for _, tx := range blk.Transactions() { + if tx.Sha().IsEqual(&txHash) { + return tx.MsgTx(), nil + } + } + } + + // Lookup and fetch the referenced output's tx in the database. + txList, err := a.server.db.FetchTxBySha(&txHash) + if err != nil { + adxrLog.Errorf("Error fetching tx %v: %v", + txHash, err) + return nil, err + } + + if len(txList) == 0 { + return nil, fmt.Errorf("transaction %v not found", + txHash) + } + + return txList[len(txList)-1].Tx, nil +} + // indexBlockAddrs returns a populated index of the all the transactions in the // passed block based on the addresses involved in each transaction. -func (a *addrIndexer) indexBlockAddrs(blk *btcutil.Block) (database.BlockAddrIndex, error) { - addrIndex := make(database.BlockAddrIndex) - txLocs, err := blk.TxLoc() +func (a *addrIndexer) indexBlockAddrs(blk *dcrutil.Block, + parent *dcrutil.Block) (database.BlockAddrIndex, error) { + var addrIndex database.BlockAddrIndex + _, stxLocs, err := blk.TxLoc() if err != nil { return nil, err } - for txIdx, tx := range blk.Transactions() { - // Tx's offset and length in the block. - locInBlock := &txLocs[txIdx] + txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, + dcrutil.BlockValid) - // Coinbases don't have any inputs. - if !blockchain.IsCoinBase(tx) { - // Index the SPK's of each input's previous outpoint - // transaction. - for _, txIn := range tx.MsgTx().TxIn { - // Lookup and fetch the referenced output's tx. - prevOut := txIn.PreviousOutPoint - txList, err := a.server.db.FetchTxBySha(&prevOut.Hash) - if len(txList) == 0 { - return nil, fmt.Errorf("transaction %v not found", - prevOut.Hash) + // Add regular transactions iff the block was validated. + if txTreeRegularValid { + txLocs, _, err := parent.TxLoc() + if err != nil { + return nil, err + } + for txIdx, tx := range parent.Transactions() { + // Tx's offset and length in the block. + locInBlock := &txLocs[txIdx] + + // Coinbases don't have any inputs. + if !blockchain.IsCoinBase(tx) { + // Index the SPK's of each input's previous outpoint + // transaction. + for _, txIn := range tx.MsgTx().TxIn { + prevOutTx, err := a.lookupTransaction( + txIn.PreviousOutPoint.Hash, + blk, + parent) + inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] + + toAppend, err := convertToAddrIndex(inputOutPoint.Version, + inputOutPoint.PkScript, parent.Height(), locInBlock) + if err != nil { + adxrLog.Errorf("Error converting tx %v: %v", + txIn.PreviousOutPoint.Hash, err) + return nil, err + } + addrIndex = append(addrIndex, toAppend...) } + } + + for _, txOut := range tx.MsgTx().TxOut { + toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, + parent.Height(), locInBlock) if err != nil { - adxrLog.Errorf("Error fetching tx %v: %v", - prevOut.Hash, err) + adxrLog.Errorf("Error converting tx %v: %v", + tx.MsgTx().TxSha(), err) return nil, err } - prevOutTx := txList[len(txList)-1] - inputOutPoint := prevOutTx.Tx.TxOut[prevOut.Index] - - indexScriptPubKey(addrIndex, inputOutPoint.PkScript, locInBlock) + addrIndex = append(addrIndex, toAppend...) } } + } - for _, txOut := range tx.MsgTx().TxOut { - indexScriptPubKey(addrIndex, txOut.PkScript, locInBlock) + // Add stake transactions. + for stxIdx, stx := range blk.STransactions() { + // Tx's offset and length in the block. + locInBlock := &stxLocs[stxIdx] + + isSSGen, _ := stake.IsSSGen(stx) + + // Index the SPK's of each input's previous outpoint + // transaction. + for i, txIn := range stx.MsgTx().TxIn { + // Stakebases don't have any inputs. + if isSSGen && i == 0 { + continue + } + + // Lookup and fetch the referenced output's tx. + prevOutTx, err := a.lookupTransaction( + txIn.PreviousOutPoint.Hash, + blk, + parent) + inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] + + toAppend, err := convertToAddrIndex(inputOutPoint.Version, + inputOutPoint.PkScript, blk.Height(), locInBlock) + if err != nil { + adxrLog.Errorf("Error converting stx %v: %v", + txIn.PreviousOutPoint.Hash, err) + return nil, err + } + addrIndex = append(addrIndex, toAppend...) + } + + for _, txOut := range stx.MsgTx().TxOut { + toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, + blk.Height(), locInBlock) + if err != nil { + adxrLog.Errorf("Error converting stx %v: %v", + stx.MsgTx().TxSha(), err) + return nil, err + } + addrIndex = append(addrIndex, toAppend...) } } + return addrIndex, nil } + +// InsertBlock synchronously queues a newly solved block to have its +// transactions indexed by address. +func (a *addrIndexer) InsertBlock(block *dcrutil.Block, parent *dcrutil.Block) error { + addrIndex, err := a.indexBlockAddrs(block, parent) + if err != nil { + return fmt.Errorf("Unable to index transactions of"+ + " block: %v", err) + } + err = a.server.db.UpdateAddrIndexForBlock(block.Sha(), + block.Height(), + addrIndex) + if err != nil { + return fmt.Errorf("Unable to insert block: %v", err.Error()) + } + + return nil +} + +// RemoveBlock removes all transactions from a block on the tip from the +// address index database. +func (a *addrIndexer) RemoveBlock(block *dcrutil.Block, + parent *dcrutil.Block) error { + addrIndex, err := a.indexBlockAddrs(block, parent) + if err != nil { + return fmt.Errorf("Unable to index transactions of"+ + " block: %v", err) + } + err = a.server.db.DropAddrIndexForBlock(block.Sha(), + block.Height(), + addrIndex) + if err != nil { + return fmt.Errorf("Unable to remove block: %v", err.Error()) + } + + return nil +} diff --git a/cmd/addblock/addblock.go b/cmd/addblock/addblock.go index 2dbc2269..339a0c60 100644 --- a/cmd/addblock/addblock.go +++ b/cmd/addblock/addblock.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,15 +10,15 @@ import ( "path/filepath" "runtime" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/limits" "github.com/btcsuite/btclog" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/limits" ) const ( - // blockDbNamePrefix is the prefix for the btcd block database. + // blockDbNamePrefix is the prefix for the dcrd block database. blockDbNamePrefix = "blocks" ) diff --git a/cmd/addblock/config.go b/cmd/addblock/config.go index cdb45a93..01296bfd 100644 --- a/cmd/addblock/config.go +++ b/cmd/addblock/config.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,12 +10,12 @@ import ( "os" "path/filepath" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" flags "github.com/btcsuite/go-flags" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -24,8 +25,8 @@ const ( ) var ( - btcdHomeDir = btcutil.AppDataDir("btcd", false) - defaultDataDir = filepath.Join(btcdHomeDir, "data") + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + defaultDataDir = filepath.Join(dcrdHomeDir, "data") knownDbTypes = database.SupportedDBs() activeNetParams = &chaincfg.MainNetParams ) @@ -34,13 +35,12 @@ var ( // // See loadConfig for details on the configuration load process. type config struct { - DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"` - DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` - TestNet3 bool `long:"testnet" description:"Use the test network"` - RegressionTest bool `long:"regtest" description:"Use the regression test network"` - SimNet bool `long:"simnet" description:"Use the simulation test network"` - InFile string `short:"i" long:"infile" description:"File containing the block(s)"` - Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"` + DataDir string `short:"b" long:"datadir" description:"Location of the dcrd data directory"` + DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` + TestNet bool `long:"testnet" description:"Use the test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` + InFile string `short:"i" long:"infile" description:"File containing the block(s)"` + Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"` } // filesExists reports whether the named file or directory exists. @@ -65,17 +65,17 @@ func validDbType(dbType string) bool { } // netName returns the name used when referring to a bitcoin network. At the -// time of writing, btcd currently places blocks for testnet version 3 in the +// time of writing, dcrd currently places blocks for testnet version 3 in the // data and log directory "testnet", which does not match the Name field of the // chaincfg parameters. This function can be used to override this directory name -// as "testnet" when the passed active network matches wire.TestNet3. +// as "testnet" when the passed active network matches wire.TestNet. // // A proper upgrade to move the data and log directories for this network to -// "testnet3" is planned for the future, at which point this function can be +// "testnet" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. func netName(chainParams *chaincfg.Params) string { switch chainParams.Net { - case wire.TestNet3: + case wire.TestNet: return "testnet" default: return chainParams.Name @@ -107,13 +107,9 @@ func loadConfig() (*config, []string, error) { numNets := 0 // Count number of network flags passed; assign active network params // while we're at it - if cfg.TestNet3 { + if cfg.TestNet { numNets++ - activeNetParams = &chaincfg.TestNet3Params - } - if cfg.RegressionTest { - numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &chaincfg.TestNetParams } if cfg.SimNet { numNets++ diff --git a/cmd/addblock/import.go b/cmd/addblock/import.go index f47178ec..29be20ce 100644 --- a/cmd/addblock/import.go +++ b/cmd/addblock/import.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,14 +12,15 @@ import ( "sync" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) -var zeroHash = wire.ShaHash{} +var zeroHash = chainhash.Hash{} // importResults houses the stats and result as an import operation. type importResults struct { @@ -94,7 +96,7 @@ func (bi *blockImporter) readBlock() ([]byte, error) { // with any potential errors. func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Deserialize the block which includes checks for malformed blocks. - block, err := btcutil.NewBlockFromBytes(serializedBlock) + block, err := dcrutil.NewBlockFromBytes(serializedBlock) if err != nil { return false, err } @@ -129,7 +131,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Ensure the blocks follows all of the chain rules and match up to the // known checkpoints. - isOrphan, err := bi.chain.ProcessBlock(block, bi.medianTime, + _, isOrphan, err := bi.chain.ProcessBlock(block, bi.medianTime, blockchain.BFFastAdd) if err != nil { return false, err @@ -303,7 +305,7 @@ func newBlockImporter(db database.Db, r io.ReadSeeker) *blockImporter { doneChan: make(chan bool), errChan: make(chan error), quit: make(chan struct{}), - chain: blockchain.New(db, activeNetParams, nil), + chain: blockchain.New(db, nil, activeNetParams, nil), medianTime: blockchain.NewMedianTime(), lastLogTime: time.Now(), } diff --git a/cmd/btcctl/config.go b/cmd/dcrctl/config.go similarity index 84% rename from cmd/btcctl/config.go rename to cmd/dcrctl/config.go index a6c37f82..45e52094 100644 --- a/cmd/btcctl/config.go +++ b/cmd/dcrctl/config.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,8 +12,9 @@ import ( "path/filepath" "strings" - "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/dcrjson" + "github.com/decred/dcrutil" + flags "github.com/btcsuite/go-flags" ) @@ -20,17 +22,17 @@ const ( // unusableFlags are the command usage flags which this utility are not // able to use. In particular it doesn't support websockets and // consequently notifications. - unusableFlags = btcjson.UFWebsocketOnly | btcjson.UFNotification + unusableFlags = dcrjson.UFWebsocketOnly | dcrjson.UFNotification ) var ( - btcdHomeDir = btcutil.AppDataDir("btcd", false) - btcctlHomeDir = btcutil.AppDataDir("btcctl", false) - btcwalletHomeDir = btcutil.AppDataDir("btcwallet", false) - defaultConfigFile = filepath.Join(btcctlHomeDir, "btcctl.conf") + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + dcrctlHomeDir = dcrutil.AppDataDir("dcrctl", false) + dcrwalletHomeDir = dcrutil.AppDataDir("dcrwallet", false) + defaultConfigFile = filepath.Join(dcrctlHomeDir, "dcrctl.conf") defaultRPCServer = "localhost" - defaultRPCCertFile = filepath.Join(btcdHomeDir, "rpc.cert") - defaultWalletCertFile = filepath.Join(btcwalletHomeDir, "rpc.cert") + defaultRPCCertFile = filepath.Join(dcrdHomeDir, "rpc.cert") + defaultWalletCertFile = filepath.Join(dcrwalletHomeDir, "rpc.cert") ) // listCommands categorizes and lists all of the usable commands along with @@ -43,10 +45,10 @@ func listCommands() { ) // Get a list of registered commands and categorize and filter them. - cmdMethods := btcjson.RegisteredCmdMethods() + cmdMethods := dcrjson.RegisteredCmdMethods() categorized := make([][]string, numCategories) for _, method := range cmdMethods { - flags, err := btcjson.MethodUsageFlags(method) + flags, err := dcrjson.MethodUsageFlags(method) if err != nil { // This should never happen since the method was just // returned from the package, but be safe. @@ -58,7 +60,7 @@ func listCommands() { continue } - usage, err := btcjson.MethodUsageText(method) + usage, err := dcrjson.MethodUsageText(method) if err != nil { // This should never happen since the method was just // returned from the package, but be safe. @@ -67,7 +69,7 @@ func listCommands() { // Categorize the command based on the usage flags. category := categoryChain - if flags&btcjson.UFWalletOnly != 0 { + if flags&dcrjson.UFWalletOnly != 0 { category = categoryWallet } categorized[category] = append(categorized[category], usage) @@ -86,7 +88,7 @@ func listCommands() { } } -// config defines the configuration options for btcctl. +// config defines the configuration options for dcrctl. // // See loadConfig for details on the configuration load process. type config struct { @@ -101,36 +103,37 @@ type config struct { Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"` ProxyUser string `long:"proxyuser" description:"Username for proxy server"` ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"` - TestNet3 bool `long:"testnet" description:"Connect to testnet"` + TestNet bool `long:"testnet" description:"Connect to testnet"` SimNet bool `long:"simnet" description:"Connect to the simulation test network"` TLSSkipVerify bool `long:"skipverify" description:"Do not verify tls certificates (not recommended!)"` Wallet bool `long:"wallet" description:"Connect to wallet"` + Terminal bool `long:"terminal" description:"Allow interactive use in a terminal"` } // normalizeAddress returns addr with the passed default port appended if // there is not already a port specified. -func normalizeAddress(addr string, useTestNet3, useSimNet, useWallet bool) string { +func normalizeAddress(addr string, useTestNet, useSimNet, useWallet bool) string { _, _, err := net.SplitHostPort(addr) if err != nil { var defaultPort string switch { - case useTestNet3: + case useTestNet: if useWallet { - defaultPort = "18332" + defaultPort = "19110" } else { - defaultPort = "18334" + defaultPort = "19109" } case useSimNet: if useWallet { - defaultPort = "18554" + defaultPort = "19557" } else { - defaultPort = "18556" + defaultPort = "19556" } default: if useWallet { - defaultPort = "8332" + defaultPort = "9110" } else { - defaultPort = "8334" + defaultPort = "9109" } } @@ -144,7 +147,7 @@ func normalizeAddress(addr string, useTestNet3, useSimNet, useWallet bool) strin func cleanAndExpandPath(path string) string { // Expand initial ~ to OS specific home directory. if strings.HasPrefix(path, "~") { - homeDir := filepath.Dir(btcctlHomeDir) + homeDir := filepath.Dir(dcrctlHomeDir) path = strings.Replace(path, "~", homeDir, 1) } @@ -174,7 +177,7 @@ func loadConfig() (*config, []string, error) { } // Create the home directory if it doesn't already exist. - err := os.MkdirAll(btcdHomeDir, 0700) + err := os.MkdirAll(dcrdHomeDir, 0700) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(-1) @@ -238,7 +241,7 @@ func loadConfig() (*config, []string, error) { // Multiple networks can't be selected simultaneously. numNets := 0 - if cfg.TestNet3 { + if cfg.TestNet { numNets++ } if cfg.SimNet { @@ -263,7 +266,7 @@ func loadConfig() (*config, []string, error) { // Add default port to RPC server based on --testnet and --wallet flags // if needed. - cfg.RPCServer = normalizeAddress(cfg.RPCServer, cfg.TestNet3, + cfg.RPCServer = normalizeAddress(cfg.RPCServer, cfg.TestNet, cfg.SimNet, cfg.Wallet) return &cfg, remainingArgs, nil diff --git a/cmd/btcctl/btcctl.go b/cmd/dcrctl/dcrctl.go similarity index 89% rename from cmd/btcctl/btcctl.go rename to cmd/dcrctl/dcrctl.go index 5c412f86..2338f39c 100644 --- a/cmd/btcctl/btcctl.go +++ b/cmd/dcrctl/dcrctl.go @@ -10,7 +10,7 @@ import ( "path/filepath" "strings" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) const ( @@ -20,7 +20,7 @@ const ( // commandUsage display the usage for a specific command. func commandUsage(method string) { - usage, err := btcjson.MethodUsageText(method) + usage, err := dcrjson.MethodUsageText(method) if err != nil { // This should never happen since the method was already checked // before calling this function, but be safe. @@ -51,6 +51,11 @@ func main() { if err != nil { os.Exit(1) } + if cfg.Terminal { + startTerminal(cfg) + os.Exit(1) + } + if len(args) < 1 { usage("No command specified") os.Exit(1) @@ -59,7 +64,7 @@ func main() { // Ensure the specified method identifies a valid registered command and // is one of the usable types. method := args[0] - usageFlags, err := btcjson.MethodUsageFlags(method) + usageFlags, err := dcrjson.MethodUsageFlags(method) if err != nil { fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method) fmt.Fprintln(os.Stderr, listCmdMessage) @@ -104,20 +109,20 @@ func main() { // Attempt to create the appropriate command using the arguments // provided by the user. - cmd, err := btcjson.NewCmd(method, params...) + cmd, err := dcrjson.NewCmd(method, params...) if err != nil { // Show the error along with its error code when it's a - // btcjson.Error as it reallistcally will always be since the + // dcrjson.Error as it reallistcally will always be since the // NewCmd function is only supposed to return errors of that // type. - if jerr, ok := err.(btcjson.Error); ok { + if jerr, ok := err.(dcrjson.Error); ok { fmt.Fprintf(os.Stderr, "%s command: %v (code: %s)\n", - method, err, jerr.ErrorCode) + method, err, jerr.Code) commandUsage(method) os.Exit(1) } - // The error is not a btcjson.Error and this really should not + // The error is not a dcrjson.Error and this really should not // happen. Nevertheless, fallback to just showing the error // if it should happen due to a bug in the package. fmt.Fprintf(os.Stderr, "%s command: %v\n", method, err) @@ -127,7 +132,7 @@ func main() { // Marshal the command into a JSON-RPC byte slice in preparation for // sending it to the RPC server. - marshalledJSON, err := btcjson.MarshalCmd(1, cmd) + marshalledJSON, err := dcrjson.MarshalCmd(1, cmd) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/cmd/btcctl/httpclient.go b/cmd/dcrctl/httpclient.go similarity index 98% rename from cmd/btcctl/httpclient.go rename to cmd/dcrctl/httpclient.go index 2a0f6dff..3554c35d 100644 --- a/cmd/btcctl/httpclient.go +++ b/cmd/dcrctl/httpclient.go @@ -10,7 +10,8 @@ import ( "net" "net/http" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" + "github.com/btcsuite/go-socks/socks" ) @@ -116,7 +117,7 @@ func sendPostRequest(marshalledJSON []byte, cfg *config) ([]byte, error) { } // Unmarshal the response. - var resp btcjson.Response + var resp dcrjson.Response if err := json.Unmarshal(respBytes, &resp); err != nil { return nil, err } diff --git a/cmd/dcrctl/sample-dcrctl.conf b/cmd/dcrctl/sample-dcrctl.conf new file mode 100644 index 00000000..8feefba4 --- /dev/null +++ b/cmd/dcrctl/sample-dcrctl.conf @@ -0,0 +1,240 @@ +[Application Options] + +; ------------------------------------------------------------------------------ +; Data settings +; ------------------------------------------------------------------------------ + +; The directory to store data such as the block chain and peer addresses. The +; block chain takes several GB, so this location must have a lot of free space. +; The default is ~/.dcrd/data on POSIX OSes, $LOCALAPPDATA/Dcrd/data on Windows, +; ~/Library/Application Support/Dcrd/data on Mac OS, and $homed/dcrd/data on +; Plan9. Environment variables are expanded so they may be used. NOTE: Windows +; environment variables are typically %VARIABLE%, but they must be accessed with +; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows. +; datadir=~/.dcrd/data + + +; ------------------------------------------------------------------------------ +; Network settings +; ------------------------------------------------------------------------------ + +; Use testnet. +; testnet=1 + +; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening +; for incoming connections unless listen addresses are provided via the 'listen' +; option. +; proxy=127.0.0.1:9050 +; proxyuser= +; proxypass= + +; The SOCKS5 proxy above is assumed to be Tor (https://www.torproject.org). +; If the proxy is not tor the the following my be used to prevent using +; tor specific SOCKS queries to lookup addresses (this increases anonymity when +; tor is used by preventing your IP being leaked via DNS). +; noonion=1 + +; Use an alternative proxy to connect to .onion addresses. The proxy is assumed +; to be a Tor node. Non .onion addresses will be contacted with the main proxy +; or without a proxy if none is set. +; onion=127.0.0.1:9051 + +; ****************************************************************************** +; Summary of 'addpeer' versus 'connect'. +; +; Only one of the following two options, 'addpeer' and 'connect', may be +; specified. Both allow you to specify peers that you want to stay connected +; with, but the behavior is slightly different. By default, dcrd will query DNS +; to find peers to connect to, so unless you have a specific reason such as +; those described below, you probably won't need to modify anything here. +; +; 'addpeer' does not prevent connections to other peers discovered from +; the peers you are connected to and also lets the remote peers know you are +; available so they can notify other peers they can to connect to you. This +; option might be useful if you are having problems finding a node for some +; reason (perhaps due to a firewall). +; +; 'connect', on the other hand, will ONLY connect to the specified peers and +; no others. It also disables listening (unless you explicitly set listen +; addresses via the 'listen' option) and DNS seeding, so you will not be +; advertised as an available peer to the peers you connect to and won't accept +; connections from any other peers. So, the 'connect' option effectively allows +; you to only connect to "trusted" peers. +; ****************************************************************************** + +; Add persistent peers to connect to as desired. One peer per line. +; You may specify each IP address with or without a port. The default port will +; be added automatically if one is not specified here. +; addpeer=192.168.1.1 +; addpeer=10.0.0.2:9108 +; addpeer=fe80::1 +; addpeer=[fe80::2]:9108 + +; Add persistent peers that you ONLY want to connect to as desired. One peer +; per line. You may specify each IP address with or without a port. The +; default port will be added automatically if one is not specified here. +; NOTE: Specifying this option has other side effects as described above in +; the 'addpeer' versus 'connect' summary section. +; connect=192.168.1.1 +; connect=10.0.0.2:9108 +; connect=fe80::1 +; connect=[fe80::2]:9108 + +; Maximum number of inbound and outbound peers. +; maxpeers=8 + +; How long to ban misbehaving peers. Valid time units are {s, m, h}. +; Minimum 1s. +; banduration=24h +; banduration=11h30m15s + +; Disable DNS seeding for peers. By default, when dcrd starts, it will use +; DNS to query for available peers to connect with. +; nodnsseed=1 + +; Specify the interfaces to listen on. One listen address per line. +; NOTE: The default port is modified by some options such as 'testnet', so it is +; recommended to not specify a port and allow a proper default to be chosen +; unless you have a specific reason to do otherwise. +; All interfaces on default port (this is the default): +; listen= +; All ipv4 interfaces on default port: +; listen=0.0.0.0 +; All ipv6 interfaces on default port: +; listen=:: +; All interfaces on port 9108: +; listen=:9108 +; All ipv4 interfaces on port 9108: +; listen=0.0.0.0:9108 +; All ipv6 interfaces on port 9108: +; listen=[::]:9108 +; Only ipv4 localhost on port 9108: +; listen=127.0.0.1:9108 +; Only ipv6 localhost on port 9108: +; listen=[::1]:9108 +; Only ipv4 localhost on non-standard port 8336: +; listen=127.0.0.1:8336 +; All interfaces on non-standard port 8336: +; listen=:8336 +; All ipv4 interfaces on non-standard port 8336: +; listen=0.0.0.0:8336 +; All ipv6 interfaces on non-standard port 8336: +; listen=[::]:8336 + +; Disable listening for incoming connections. This will override all listeners. +; nolisten=1 + + +; ------------------------------------------------------------------------------ +; RPC server options - The following options control the built-in RPC server +; which is used to control and query information from a running dcrd process. +; +; NOTE: The RPC server is disabled by default if rpcuser AND rpcpass, or +; rpclimituser AND rpclimitpass, are not specified. +; ------------------------------------------------------------------------------ + +; Secure the RPC API by specifying the username and password. You can also +; specify a limited username and password. You must specify at least one +; full set of credentials - limited or admin - or the RPC server will +; be disabled. +; rpcuser=whatever_admin_username_you_want +; rpcpass= +; rpclimituser=whatever_limited_username_you_want +; rpclimitpass= + +; Specify the interfaces for the RPC server listen on. One listen address per +; line. NOTE: The default port is modified by some options such as 'testnet', +; so it is recommended to not specify a port and allow a proper default to be +; chosen unless you have a specific reason to do otherwise. +; All interfaces on default port (this is the default): +; rpclisten= +; All ipv4 interfaces on default port: +; rpclisten=0.0.0.0 +; All ipv6 interfaces on default port: +; rpclisten=:: +; All interfaces on port 1909: +; rpclisten=:1909 +; All ipv4 interfaces on port 1909: +; rpclisten=0.0.0.0:1909 +; All ipv6 interfaces on port 1909: +; rpclisten=[::]:1909 +; Only ipv4 localhost on port 1909: +; rpclisten=127.0.0.1:1909 +; Only ipv6 localhost on port 1909: +; rpclisten=[::1]:1909 +; Only ipv4 localhost on non-standard port 8337: +; rpclisten=127.0.0.1:8337 +; All interfaces on non-standard port 8337: +; rpclisten=:8337 +; All ipv4 interfaces on non-standard port 8337: +; rpclisten=0.0.0.0:8337 +; All ipv6 interfaces on non-standard port 8337: +; rpclisten=[::]:8337 + +; Specify the maximum number of concurrent RPC clients for standard connections. +; rpcmaxclients=10 + +; Specify the maximum number of concurrent RPC websocket clients. +; rpcmaxwebsockets=25 + +; Use the following setting to disable the RPC server even if the rpcuser and +; rpcpass are specified above. This allows one to quickly disable the RPC +; server without having to remove credentials from the config file. +; norpc=1 + + +; ------------------------------------------------------------------------------ +; Coin Generation (Mining) Settings - The following options control the +; generation of block templates used by external mining applications through RPC +; calls as well as the built-in CPU miner (if enabled). +; ------------------------------------------------------------------------------ + +; Enable built-in CPU mining. +; +; NOTE: This is typically only useful for testing purposes such as testnet or +; simnet since the difficutly on mainnet is far too high for CPU mining to be +; worth your while. +; generate=false + +; Add addresses to pay mined blocks to for CPU mining and the block templates +; generated for the getwork RPC as desired. One address per line. +; miningaddr=youraddress +; miningaddr=youraddress2 +; miningaddr=youraddress3 + +; Specify the minimum block size in bytes to create. By default, only +; transactions which have enough fees or a high enough priority will be included +; in generated block templates. Specifying a minimum block size will instead +; attempt to fill generated block templates up with transactions until it is at +; least the specified number of bytes. +; blockminsize=0 + +; Specify the maximum block size in bytes to create. This value will be limited +; to the consensus limit if it is larger than this value. +; blockmaxsize=750000 + +; Specify the size in bytes of the high-priority/low-fee area when creating a +; block. Transactions which consist of large amounts, old inputs, and small +; sizes have the highest priority. One consequence of this is that as low-fee +; or free transactions age, they raise in priority thereby making them more +; likely to be included in this section of a new block. This value is limited +; by the blackmaxsize option and will be limited as needed. +; blockprioritysize=50000 + + +; ------------------------------------------------------------------------------ +; Debug +; ------------------------------------------------------------------------------ + +; Debug logging level. +; Valid levels are {trace, debug, info, warn, error, critical} +; You may also specify =,=,... to set +; log level for individual subsystems. Use dcrd --debuglevel=show to list +; available subsystems. +; debuglevel=info + +; The port used to listen for HTTP profile requests. The profile server will +; be disabled if this option is not specified. The profile information can be +; accessed at http://localhost:/debug/pprof once running. +; profile=6061 + diff --git a/cmd/dcrctl/terminal.go b/cmd/dcrctl/terminal.go new file mode 100644 index 00000000..43c120fa --- /dev/null +++ b/cmd/dcrctl/terminal.go @@ -0,0 +1,204 @@ +// terminal +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/decred/dcrd/dcrjson" + "golang.org/x/crypto/ssh/terminal" +) + +func execute(quit chan bool, protected *bool, cfg *config, line string) { + switch line { + case "h": + fallthrough + case "help": + fmt.Printf("[h]elp print this message\n") + fmt.Printf("[l]ist list all available commands\n") + fmt.Printf("[p]rotect toggle protected mode (for passwords)\n") + fmt.Printf("[q]uit/ctrl+d exit\n") + fmt.Printf("Enter commands with arguments to execute them.\n") + case "l": + fallthrough + case "list": + listCommands() + case "q": + fallthrough + case "quit": + quit <- true + case "p": + fallthrough + case "protect": + if *protected { + *protected = false + return + } + *protected = true + return + default: + args := strings.Split(line, " ") + + if len(args) < 1 { + usage("No command specified") + return + } + + // Ensure the specified method identifies a valid registered command and + // is one of the usable types. + listCmdMessageLocal := "Enter [l]ist to list commands" + method := args[0] + usageFlags, err := dcrjson.MethodUsageFlags(method) + if err != nil { + fmt.Fprintf(os.Stderr, "Unrecognized command '%s'\n", method) + fmt.Fprintln(os.Stderr, listCmdMessageLocal) + return + } + if usageFlags&unusableFlags != 0 { + fmt.Fprintf(os.Stderr, "The '%s' command can only be used via "+ + "websockets\n", method) + fmt.Fprintln(os.Stderr, listCmdMessageLocal) + return + } + + // Convert remaining command line args to a slice of interface values + // to be passed along as parameters to new command creation function. + // + // Since some commands, such as submitblock, can involve data which is + // too large for the Operating System to allow as a normal command line + // parameter, support using '-' as an argument to allow the argument + // to be read from a stdin pipe. + bio := bufio.NewReader(os.Stdin) + params := make([]interface{}, 0, len(args[1:])) + for _, arg := range args[1:] { + if arg == "-" { + param, err := bio.ReadString('\n') + if err != nil && err != io.EOF { + fmt.Fprintf(os.Stderr, "Failed to read data "+ + "from stdin: %v\n", err) + return + } + if err == io.EOF && len(param) == 0 { + fmt.Fprintln(os.Stderr, "Not enough lines "+ + "provided on stdin") + return + } + param = strings.TrimRight(param, "\r\n") + params = append(params, param) + continue + } + + params = append(params, arg) + } + + // Attempt to create the appropriate command using the arguments + // provided by the user. + cmd, err := dcrjson.NewCmd(method, params...) + if err != nil { + // Show the error along with its error code when it's a + // dcrjson.Error as it reallistcally will always be since the + // NewCmd function is only supposed to return errors of that + // type. + if jerr, ok := err.(dcrjson.Error); ok { + fmt.Fprintf(os.Stderr, "%s command: %v (code: %s)\n", + method, err, jerr.Code) + commandUsage(method) + return + } + + // The error is not a dcrjson.Error and this really should not + // happen. Nevertheless, fallback to just showing the error + // if it should happen due to a bug in the package. + fmt.Fprintf(os.Stderr, "%s command: %v\n", method, err) + commandUsage(method) + return + } + + // Marshal the command into a JSON-RPC byte slice in preparation for + // sending it to the RPC server. + marshalledJSON, err := dcrjson.MarshalCmd(1, cmd) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + + // Send the JSON-RPC request to the server using the user-specified + // connection configuration. + result, err := sendPostRequest(marshalledJSON, cfg) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + + // Choose how to display the result based on its type. + strResult := string(result) + if strings.HasPrefix(strResult, "{") || strings.HasPrefix(strResult, "[") { + var dst bytes.Buffer + if err := json.Indent(&dst, result, "", " "); err != nil { + fmt.Fprintf(os.Stderr, "Failed to format result: %v", + err) + return + } + fmt.Println(dst.String()) + return + + } else if strings.HasPrefix(strResult, `"`) { + var str string + if err := json.Unmarshal(result, &str); err != nil { + fmt.Fprintf(os.Stderr, "Failed to unmarshal result: %v", + err) + return + } + fmt.Println(str) + return + + } else if strResult != "null" { + fmt.Println(strResult) + } + } +} + +func startTerminal(c *config) { + fmt.Printf("Starting terminal mode.\n") + fmt.Printf("Enter h for [h]elp.\n") + fmt.Printf("Enter q for [q]uit.\n") + done := make(chan bool) + initState, err := terminal.GetState(0) + protected := false + if err != nil { + fmt.Printf("error getting terminal state: %v\n", err.Error()) + return + } + + go func() { + terminal.MakeRaw(int(os.Stdin.Fd())) + n := terminal.NewTerminal(os.Stdin, "> ") + for { + var ln string + var err error + if !protected { + ln, err = n.ReadLine() + if err != nil { + done <- true + } + } else { + ln, err = n.ReadPassword(">*") + if err != nil { + done <- true + } + } + execute(done, &protected, c, ln) + } + }() + select { + case <-done: + fmt.Printf("exiting...\n") + terminal.Restore(0, initState) + close(done) + } +} diff --git a/cmd/btcctl/version.go b/cmd/dcrctl/version.go similarity index 98% rename from cmd/btcctl/version.go rename to cmd/dcrctl/version.go index 12a27ab5..024d0bb3 100644 --- a/cmd/btcctl/version.go +++ b/cmd/dcrctl/version.go @@ -1,4 +1,5 @@ // Copyright (c) 2013 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/cmd/dropafter/dropafter.go b/cmd/dropafter/dropafter.go index dca1041e..07ccb96f 100644 --- a/cmd/dropafter/dropafter.go +++ b/cmd/dropafter/dropafter.go @@ -1,4 +1,5 @@ // Copyright (c) 2013 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,27 +12,28 @@ import ( "path/filepath" "strconv" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btclog" - "github.com/btcsuite/btcutil" flags "github.com/btcsuite/go-flags" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) type config struct { - DataDir string `short:"b" long:"datadir" description:"Directory to store data"` - DbType string `long:"dbtype" description:"Database backend"` - TestNet3 bool `long:"testnet" description:"Use the test network"` - RegressionTest bool `long:"regtest" description:"Use the regression test network"` - SimNet bool `long:"simnet" description:"Use the simulation test network"` - ShaString string `short:"s" description:"Block SHA to process" required:"true"` + DataDir string `short:"b" long:"datadir" description:"Directory to store data"` + DbType string `long:"dbtype" description:"Database backend"` + TestNet bool `long:"testnet" description:"Use the test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` + ShaString string `short:"s" description:"Block SHA to process" required:"true"` } var ( - btcdHomeDir = btcutil.AppDataDir("btcd", false) - defaultDataDir = filepath.Join(btcdHomeDir, "data") + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + defaultDataDir = filepath.Join(dcrdHomeDir, "data") log btclog.Logger activeNetParams = &chaincfg.MainNetParams ) @@ -41,18 +43,18 @@ const ( argHeight ) -// netName returns the name used when referring to a bitcoin network. At the -// time of writing, btcd currently places blocks for testnet version 3 in the +// netName returns the name used when referring to a decred network. At the +// time of writing, dcrd currently places blocks for testnet version 0 in the // data and log directory "testnet", which does not match the Name field of the // chaincfg parameters. This function can be used to override this directory name -// as "testnet" when the passed active network matches wire.TestNet3. +// as "testnet" when the passed active network matches wire.TestNet. // // A proper upgrade to move the data and log directories for this network to -// "testnet3" is planned for the future, at which point this function can be +// "testnet" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. func netName(chainParams *chaincfg.Params) string { switch chainParams.Net { - case wire.TestNet3: + case wire.TestNet: return "testnet" default: return chainParams.Name @@ -83,13 +85,9 @@ func main() { numNets := 0 // Count number of network flags passed; assign active network params // while we're at it - if cfg.TestNet3 { + if cfg.TestNet { numNets++ - activeNetParams = &chaincfg.TestNet3Params - } - if cfg.RegressionTest { - numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &chaincfg.TestNetParams } if cfg.SimNet { numNets++ @@ -137,11 +135,11 @@ func main() { } -func getSha(db database.Db, str string) (wire.ShaHash, error) { +func getSha(db database.Db, str string) (chainhash.Hash, error) { argtype, idx, sha, err := parsesha(str) if err != nil { log.Warnf("unable to decode [%v] %v", str, err) - return wire.ShaHash{}, err + return chainhash.Hash{}, err } switch argtype { @@ -150,7 +148,7 @@ func getSha(db database.Db, str string) (wire.ShaHash, error) { case argHeight: sha, err = db.FetchBlockShaByHeight(idx) if err != nil { - return wire.ShaHash{}, err + return chainhash.Hash{}, err } } if sha == nil { @@ -167,8 +165,8 @@ var errBadShaPrefix = errors.New("invalid prefix") var errBadShaLen = errors.New("invalid len") var errBadShaChar = errors.New("invalid character") -func parsesha(argstr string) (argtype int, height int64, psha *wire.ShaHash, err error) { - var sha wire.ShaHash +func parsesha(argstr string) (argtype int, height int64, psha *chainhash.Hash, err error) { + var sha chainhash.Hash var hashbuf string diff --git a/cmd/findcheckpoint/config.go b/cmd/findcheckpoint/config.go index 2cb1a5ae..bfa9c05e 100644 --- a/cmd/findcheckpoint/config.go +++ b/cmd/findcheckpoint/config.go @@ -1,4 +1,5 @@ // Copyright (c) 2013 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,12 +10,12 @@ import ( "os" "path/filepath" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" flags "github.com/btcsuite/go-flags" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -25,8 +26,8 @@ const ( ) var ( - btcdHomeDir = btcutil.AppDataDir("btcd", false) - defaultDataDir = filepath.Join(btcdHomeDir, "data") + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + defaultDataDir = filepath.Join(dcrdHomeDir, "data") knownDbTypes = database.SupportedDBs() activeNetParams = &chaincfg.MainNetParams ) @@ -35,13 +36,12 @@ var ( // // See loadConfig for details on the configuration load process. type config struct { - DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"` - DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` - TestNet3 bool `long:"testnet" description:"Use the test network"` - RegressionTest bool `long:"regtest" description:"Use the regression test network"` - SimNet bool `long:"simnet" description:"Use the simulation test network"` - NumCandidates int `short:"n" long:"numcandidates" description:"Max num of checkpoint candidates to show {1-20}"` - UseGoOutput bool `short:"g" long:"gooutput" description:"Display the candidates using Go syntax that is ready to insert into the btcchain checkpoint list"` + DataDir string `short:"b" long:"datadir" description:"Location of the dcrd data directory"` + DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` + TestNet bool `long:"testnet" description:"Use the test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` + NumCandidates int `short:"n" long:"numcandidates" description:"Max num of checkpoint candidates to show {1-20}"` + UseGoOutput bool `short:"g" long:"gooutput" description:"Display the candidates using Go syntax that is ready to insert into the dcrchain checkpoint list"` } // validDbType returns whether or not dbType is a supported database type. @@ -55,18 +55,18 @@ func validDbType(dbType string) bool { return false } -// netName returns the name used when referring to a bitcoin network. At the -// time of writing, btcd currently places blocks for testnet version 3 in the +// netName returns the name used when referring to a decred network. At the +// time of writing, dcrd currently places blocks for testnet version 0 in the // data and log directory "testnet", which does not match the Name field of the // chaincfg parameters. This function can be used to override this directory name -// as "testnet" when the passed active network matches wire.TestNet3. +// as "testnet" when the passed active network matches wire.TestNet. // // A proper upgrade to move the data and log directories for this network to -// "testnet3" is planned for the future, at which point this function can be +// "testnet" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. func netName(chainParams *chaincfg.Params) string { switch chainParams.Net { - case wire.TestNet3: + case wire.TestNet: return "testnet" default: return chainParams.Name @@ -97,13 +97,9 @@ func loadConfig() (*config, []string, error) { numNets := 0 // Count number of network flags passed; assign active network params // while we're at it - if cfg.TestNet3 { + if cfg.TestNet { numNets++ - activeNetParams = &chaincfg.TestNet3Params - } - if cfg.RegressionTest { - numNets++ - activeNetParams = &chaincfg.RegressionNetParams + activeNetParams = &chaincfg.TestNetParams } if cfg.SimNet { numNets++ diff --git a/cmd/findcheckpoint/findcheckpoint.go b/cmd/findcheckpoint/findcheckpoint.go index d7ef81e6..d38dfd18 100644 --- a/cmd/findcheckpoint/findcheckpoint.go +++ b/cmd/findcheckpoint/findcheckpoint.go @@ -1,4 +1,5 @@ // Copyright (c) 2013 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,11 +10,11 @@ import ( "os" "path/filepath" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" ) const blockDbNamePrefix = "blocks" @@ -41,10 +42,10 @@ func loadBlockDB() (database.Db, error) { // findCandidates searches the chain backwards for checkpoint candidates and // returns a slice of found candidates, if any. It also stops searching for -// candidates at the last checkpoint that is already hard coded into btcchain +// candidates at the last checkpoint that is already hard coded into chain // since there is no point in finding candidates before already existing // checkpoints. -func findCandidates(db database.Db, latestHash *wire.ShaHash) ([]*chaincfg.Checkpoint, error) { +func findCandidates(db database.Db, latestHash *chainhash.Hash) ([]*chaincfg.Checkpoint, error) { // Start with the latest block of the main chain. block, err := db.FetchBlockBySha(latestHash) if err != nil { @@ -53,7 +54,7 @@ func findCandidates(db database.Db, latestHash *wire.ShaHash) ([]*chaincfg.Check // Setup chain and get the latest checkpoint. Ignore notifications // since they aren't needed for this util. - chain := blockchain.New(db, activeNetParams, nil) + chain := blockchain.New(db, nil, activeNetParams, nil) latestCheckpoint := chain.LatestCheckpoint() if latestCheckpoint == nil { return nil, fmt.Errorf("unable to retrieve latest checkpoint") @@ -114,7 +115,7 @@ func findCandidates(db database.Db, latestHash *wire.ShaHash) ([]*chaincfg.Check // showCandidate display a checkpoint candidate using and output format // determined by the configuration parameters. The Go syntax output -// uses the format the btcchain code expects for checkpoints added to the list. +// uses the format the chain code expects for checkpoints added to the list. func showCandidate(candidateNum int, checkpoint *chaincfg.Checkpoint) { if cfg.UseGoOutput { fmt.Printf("Candidate %d -- {%d, newShaHashFromStr(\"%v\")},\n", diff --git a/cmd/gencerts/gencerts.go b/cmd/gencerts/gencerts.go index 959b264a..f88d920a 100644 --- a/cmd/gencerts/gencerts.go +++ b/cmd/gencerts/gencerts.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,8 +13,8 @@ import ( "strings" "time" - "github.com/btcsuite/btcutil" flags "github.com/btcsuite/go-flags" + "github.com/decred/dcrutil" ) type config struct { @@ -58,7 +59,7 @@ func main() { } validUntil := time.Now().Add(time.Duration(cfg.Years) * 365 * 24 * time.Hour) - cert, key, err := btcutil.NewTLSCertPair(cfg.Organization, validUntil, cfg.ExtraHosts) + cert, key, err := dcrutil.NewTLSCertPair(cfg.Organization, validUntil, cfg.ExtraHosts) if err != nil { fmt.Fprintf(os.Stderr, "cannot generate certificate pair: %v\n", err) os.Exit(1) @@ -81,7 +82,7 @@ func main() { func cleanAndExpandPath(path string) string { // Expand initial ~ to OS specific home directory. if strings.HasPrefix(path, "~") { - appHomeDir := btcutil.AppDataDir("gencerts", false) + appHomeDir := dcrutil.AppDataDir("gencerts", false) homeDir := filepath.Dir(appHomeDir) path = strings.Replace(path, "~", homeDir, 1) } diff --git a/cmd/showblock/showblock.go b/cmd/showblock/showblock.go new file mode 100644 index 00000000..cae8b5b1 --- /dev/null +++ b/cmd/showblock/showblock.go @@ -0,0 +1,308 @@ +// Copyright (c) 2013 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + + "github.com/btcsuite/btclog" + flags "github.com/btcsuite/go-flags" + "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" +) + +type Hash chainhash.Hash + +type config struct { + DataDir string `short:"b" long:"datadir" description:"Directory to store data"` + DbType string `long:"dbtype" description:"Database backend"` + TestNet bool `long:"testnet" description:"Use the test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` + OutFile string `short:"o" description:"outfile"` + Progress bool `short:"p" description:"show progress"` + ShaString string `short:"s" description:"Block SHA to process" required:"true"` + EShaString string `short:"e" description:"End Block SHA to process"` + RawBlock bool `short:"r" description:"Raw Block"` + FmtBlock bool `short:"f" description:"Format Block"` + ShowTx bool `short:"t" description:"Show transaction"` +} + +var ( + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + defaultDataDir = filepath.Join(dcrdHomeDir, "data") + log btclog.Logger + activeNetParams = &chaincfg.MainNetParams +) + +const ( + ArgSha = iota + ArgHeight +) + +// netName returns the name used when referring to a bitcoin network. At the +// time of writing, dcrd currently places blocks for testnet version 0 in the +// data and log directory "testnet", which does not match the Name field of the +// dcrnet parameters. This function can be used to override this directory name +// as "testnet" when the passed active network matches wire.TestNet. +// +// A proper upgrade to move the data and log directories for this network to +// "testnet" is planned for the future, at which point this function can be +// removed and the network parameter's name used instead. +func netName(netParams *chaincfg.Params) string { + switch netParams.Net { + case wire.TestNet: + return "testnet" + default: + return netParams.Name + } +} + +func main() { + end := int64(-1) + + cfg := config{ + DbType: "leveldb", + DataDir: defaultDataDir, + } + parser := flags.NewParser(&cfg, flags.Default) + _, err := parser.Parse() + if err != nil { + if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { + parser.WriteHelp(os.Stderr) + } + return + } + + backendLogger := btclog.NewDefaultBackendLogger() + defer backendLogger.Flush() + log = btclog.NewSubsystemLogger(backendLogger, "") + database.UseLogger(log) + + // Multiple networks can't be selected simultaneously. + funcName := "main" + numNets := 0 + // Count number of network flags passed; assign active network params + // while we're at it + if cfg.TestNet { + numNets++ + activeNetParams = &chaincfg.TestNetParams + } + if cfg.SimNet { + numNets++ + activeNetParams = &chaincfg.SimNetParams + } + if numNets > 1 { + str := "%s: The testnet, regtest, and simnet params can't be " + + "used together -- choose one of the three" + err := fmt.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + parser.WriteHelp(os.Stderr) + return + } + cfg.DataDir = filepath.Join(cfg.DataDir, netName(activeNetParams)) + + blockDbNamePrefix := "blocks" + dbName := blockDbNamePrefix + "_" + cfg.DbType + if cfg.DbType == "sqlite" { + dbName = dbName + ".db" + } + dbPath := filepath.Join(cfg.DataDir, dbName) + + log.Infof("loading db %v", cfg.DbType) + database, err := database.OpenDB(cfg.DbType, dbPath) + if err != nil { + log.Warnf("db open failed: %v", err) + return + } + defer database.Close() + log.Infof("db load complete") + + height, err := getHeight(database, cfg.ShaString) + if err != nil { + log.Infof("Invalid block %v", cfg.ShaString) + return + } + if cfg.EShaString != "" { + end, err = getHeight(database, cfg.EShaString) + if err != nil { + log.Infof("Invalid end block %v", cfg.EShaString) + return + } + } else { + end = height + 1 + } + + log.Infof("height %v end %v", height, end) + + var fo io.WriteCloser + if cfg.OutFile != "" { + fo, err = os.Create(cfg.OutFile) + if err != nil { + log.Warnf("failed to open file %v, err %v", cfg.OutFile, err) + } + defer func() { + if err := fo.Close(); err != nil { + log.Warn("failed to close file %v %v", cfg.OutFile, err) + } + }() + } + + for ; height < end; height++ { + if cfg.Progress && height%int64(1) == 0 { + log.Infof("Processing block %v", height) + } + err = DumpBlock(database, height, fo, cfg.RawBlock, cfg.FmtBlock, cfg.ShowTx) + if err != nil { + break + } + } + if cfg.Progress { + height-- + log.Infof("Processing block %v", height) + } +} + +func getHeight(database database.Db, str string) (int64, error) { + argtype, idx, sha, err := parsesha(str) + if err != nil { + log.Warnf("unable to decode [%v] %v", str, err) + return 0, err + } + + switch argtype { + case ArgSha: + // nothing to do + blk, err := database.FetchBlockBySha(sha) + if err != nil { + log.Warnf("unable to locate block sha %v err %v", + sha, err) + return 0, err + } + idx = blk.Height() + case ArgHeight: + } + return idx, nil +} + +func DumpBlock(database database.Db, height int64, fo io.Writer, rflag bool, fflag bool, tflag bool) error { + sha, err := database.FetchBlockShaByHeight(height) + + if err != nil { + return err + } + blk, err := database.FetchBlockBySha(sha) + if err != nil { + log.Warnf("Failed to fetch block %v, err %v", sha, err) + return err + } + rblk, err := blk.Bytes() + blkid := blk.Height() + + if rflag { + log.Infof("Block %v depth %v %v", sha, blkid, spew.Sdump(rblk)) + } + + mblk := blk.MsgBlock() + if fflag { + log.Infof("Block %v depth %v %v", sha, blkid, spew.Sdump(mblk)) + } + if tflag { + log.Infof("Num transactions %v", len(mblk.Transactions)) + for i, tx := range mblk.Transactions { + txsha := tx.TxSha() + log.Infof("tx %v: %v", i, &txsha) + + } + } + if fo != nil { + // generate and write header values + binary.Write(fo, binary.LittleEndian, uint32(wire.SimNet)) + binary.Write(fo, binary.LittleEndian, uint32(len(rblk))) + + // write block + fo.Write(rblk) + } + return nil +} + +var ntxcnt int64 +var txspendcnt int64 +var txgivecnt int64 + +var ErrBadShaPrefix = errors.New("invalid prefix") +var ErrBadShaLen = errors.New("invalid len") +var ErrBadShaChar = errors.New("invalid character") + +func parsesha(argstr string) (argtype int, height int64, psha *chainhash.Hash, err error) { + var sha chainhash.Hash + + var hashbuf string + + switch len(argstr) { + case 64: + hashbuf = argstr + case 66: + if argstr[0:2] != "0x" { + log.Infof("prefix is %v", argstr[0:2]) + err = ErrBadShaPrefix + return + } + hashbuf = argstr[2:] + default: + if len(argstr) <= 16 { + // assume value is height + argtype = ArgHeight + var h int + h, err = strconv.Atoi(argstr) + if err == nil { + height = int64(h) + return + } + log.Infof("Unable to parse height %v, err %v", height, err) + } + err = ErrBadShaLen + return + } + + var buf [32]byte + for idx, ch := range hashbuf { + var val rune + + switch { + case ch >= '0' && ch <= '9': + val = ch - '0' + case ch >= 'a' && ch <= 'f': + val = ch - 'a' + rune(10) + case ch >= 'A' && ch <= 'F': + val = ch - 'A' + rune(10) + default: + err = ErrBadShaChar + return + } + b := buf[31-idx/2] + if idx&1 == 1 { + b |= byte(val) + } else { + b |= (byte(val) << 4) + } + buf[31-idx/2] = b + } + sha.SetBytes(buf[0:32]) + psha = &sha + return +} diff --git a/config.go b/config.go index 48e168bf..4592ed2b 100644 --- a/config.go +++ b/config.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,21 +17,21 @@ import ( "strings" "time" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - _ "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" flags "github.com/btcsuite/go-flags" "github.com/btcsuite/go-socks/socks" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + _ "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( - defaultConfigFilename = "btcd.conf" + defaultConfigFilename = "dcrd.conf" defaultDataDirname = "data" defaultLogLevel = "info" defaultLogDirname = "logs" - defaultLogFilename = "btcd.log" + defaultLogFilename = "dcrd.log" defaultMaxPeers = 125 defaultBanDuration = time.Hour * 24 defaultMaxRPCClients = 10 @@ -39,29 +40,31 @@ const ( defaultDbType = "leveldb" defaultFreeTxRelayLimit = 15.0 defaultBlockMinSize = 0 - defaultBlockMaxSize = 750000 + defaultBlockMaxSize = 375000 blockMaxSizeMin = 1000 blockMaxSizeMax = wire.MaxBlockPayload - 1000 defaultBlockPrioritySize = 50000 defaultGenerate = false defaultAddrIndex = false + defaultNonAggressive = false + defaultNoMiningStateSync = false ) var ( - btcdHomeDir = btcutil.AppDataDir("btcd", false) - defaultConfigFile = filepath.Join(btcdHomeDir, defaultConfigFilename) - defaultDataDir = filepath.Join(btcdHomeDir, defaultDataDirname) + dcrdHomeDir = dcrutil.AppDataDir("dcrd", false) + defaultConfigFile = filepath.Join(dcrdHomeDir, defaultConfigFilename) + defaultDataDir = filepath.Join(dcrdHomeDir, defaultDataDirname) knownDbTypes = database.SupportedDBs() - defaultRPCKeyFile = filepath.Join(btcdHomeDir, "rpc.key") - defaultRPCCertFile = filepath.Join(btcdHomeDir, "rpc.cert") - defaultLogDir = filepath.Join(btcdHomeDir, defaultLogDirname) + defaultRPCKeyFile = filepath.Join(dcrdHomeDir, "rpc.key") + defaultRPCCertFile = filepath.Join(dcrdHomeDir, "rpc.cert") + defaultLogDir = filepath.Join(dcrdHomeDir, defaultLogDirname) ) // runServiceCommand is only set to a real function on Windows. It is used // to parse and execute service commands specified via the -s flag. var runServiceCommand func(string) error -// config defines the configuration options for btcd. +// config defines the configuration options for dcrd. // // See loadConfig for details on the configuration load process. type config struct { @@ -72,14 +75,14 @@ type config struct { AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"` ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` DisableListen bool `long:"nolisten" description:"Disable listening for incoming connections -- NOTE: Listening is automatically disabled if the --connect or --proxy options are used without also specifying listen interfaces via --listen"` - Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 8333, testnet: 18333)"` + Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 9108, testnet: 19108)"` MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"` BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` RPCUser string `short:"u" long:"rpcuser" description:"Username for RPC connections"` RPCPass string `short:"P" long:"rpcpass" default-mask:"-" description:"Password for RPC connections"` RPCLimitUser string `long:"rpclimituser" description:"Username for limited RPC connections"` RPCLimitPass string `long:"rpclimitpass" default-mask:"-" description:"Password for limited RPC connections"` - RPCListeners []string `long:"rpclisten" description:"Add an interface/port to listen for RPC connections (default port: 8334, testnet: 18334)"` + RPCListeners []string `long:"rpclisten" description:"Add an interface/port to listen for RPC connections (default port: 9109, testnet: 19109)"` RPCCert string `long:"rpccert" description:"File containing the certificate file"` RPCKey string `long:"rpckey" description:"File containing the certificate key"` RPCMaxClients int `long:"rpcmaxclients" description:"Max number of RPC clients for standard connections"` @@ -96,34 +99,38 @@ type config struct { OnionProxyPass string `long:"onionpass" default-mask:"-" description:"Password for onion proxy server"` NoOnion bool `long:"noonion" description:"Disable connecting to tor hidden services"` TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` - TestNet3 bool `long:"testnet" description:"Use the test network"` - RegressionTest bool `long:"regtest" description:"Use the regression test network"` + TestNet bool `long:"testnet" description:"Use the test network"` SimNet bool `long:"simnet" description:"Use the simulation test network"` DisableCheckpoints bool `long:"nocheckpoints" description:"Disable built-in checkpoints. Don't do this unless you know what you're doing."` DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"` + MemProfile string `long:"memprofile" description:"Write mem profile to the specified file"` + DumpBlockchain string `long:"dumpblockchain" description:"Write blockchain as a gob-encoded map to the specified file"` + MiningTimeOffset int `long:"miningtimeoffset" description:"Offset the mining timestamp of a block by this many seconds (positive values are in the past)"` DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"` FreeTxRelayLimit float64 `long:"limitfreerelay" description:"Limit relay of transactions with no transaction fee to the given amount in thousands of bytes per minute"` NoRelayPriority bool `long:"norelaypriority" description:"Do not require free or low-fee transactions to have high priority for relaying"` MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"` - Generate bool `long:"generate" description:"Generate (mine) bitcoins using the CPU"` + Generate bool `long:"generate" description:"Generate (mine) coins using the CPU"` MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"` BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"` BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"` BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"` GetWorkKeys []string `long:"getworkkey" description:"DEPRECATED -- Use the --miningaddr option instead"` - AddrIndex bool `long:"addrindex" description:"Build and maintain a full address index. Currently only supported by leveldb."` + NoAddrIndex bool `long:"addrindex" description:"Disable building and maintaining a full address index. Currently only supported by leveldb. Will prevent wallet resyncing from seed."` DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up, and the exits."` + NonAggressive bool `long:"nonaggressive" description:"Disable mining off of the parent block of the blockchain if there aren't enough voters"` + NoMiningStateSync bool `long:"nominingstatesync" description:"Disable synchronizing the mining state with other nodes"` onionlookup func(string) ([]net.IP, error) lookup func(string) ([]net.IP, error) oniondial func(string, string) (net.Conn, error) dial func(string, string) (net.Conn, error) - miningAddrs []btcutil.Address + miningAddrs []dcrutil.Address } -// serviceOptions defines the configuration options for btcd as a service on +// serviceOptions defines the configuration options for the daemon as a service on // Windows. type serviceOptions struct { ServiceCommand string `short:"s" long:"service" description:"Service command {install, remove, start, stop}"` @@ -134,7 +141,7 @@ type serviceOptions struct { func cleanAndExpandPath(path string) string { // Expand initial ~ to OS specific home directory. if strings.HasPrefix(path, "~") { - homeDir := filepath.Dir(btcdHomeDir) + homeDir := filepath.Dir(dcrdHomeDir) path = strings.Replace(path, "~", homeDir, 1) } @@ -300,7 +307,7 @@ func newConfigParser(cfg *config, so *serviceOptions, options flags.Options) *fl // 3) Load configuration file overwriting defaults with any specified options // 4) Parse CLI options and overwrite/add any specified options // -// The above results in btcd functioning properly without any config settings +// The above results in daemon functioning properly without any config settings // while still allowing the user to override settings with config files and // command line options. Command line options always take precedence. func loadConfig() (*config, []string, error) { @@ -323,7 +330,8 @@ func loadConfig() (*config, []string, error) { BlockPrioritySize: defaultBlockPrioritySize, MaxOrphanTxs: maxOrphanTransactions, Generate: defaultGenerate, - AddrIndex: defaultAddrIndex, + NoAddrIndex: defaultAddrIndex, + NoMiningStateSync: defaultNoMiningStateSync, } // Service options which are only added on Windows. @@ -366,7 +374,7 @@ func loadConfig() (*config, []string, error) { // Load additional config from file. var configFileError error parser := newConfigParser(&cfg, &serviceOpts, flags.Default) - if !(preCfg.RegressionTest || preCfg.SimNet) || preCfg.ConfigFile != + if !(preCfg.SimNet) || preCfg.ConfigFile != defaultConfigFile { err := flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile) @@ -381,11 +389,6 @@ func loadConfig() (*config, []string, error) { } } - // Don't add peers from the config file when in regression test mode. - if preCfg.RegressionTest && len(cfg.AddPeers) > 0 { - cfg.AddPeers = nil - } - // Parse command line options again to ensure they take precedence. remainingArgs, err := parser.Parse() if err != nil { @@ -397,7 +400,7 @@ func loadConfig() (*config, []string, error) { // Create the home directory if it doesn't already exist. funcName := "loadConfig" - err = os.MkdirAll(btcdHomeDir, 0700) + err = os.MkdirAll(dcrdHomeDir, 0700) if err != nil { // Show a nicer error message if it's because a symlink is // linked to a directory that does not exist (probably because @@ -417,15 +420,12 @@ func loadConfig() (*config, []string, error) { // Multiple networks can't be selected simultaneously. numNets := 0 + // Count number of network flags passed; assign active network params // while we're at it - if cfg.TestNet3 { + if cfg.TestNet { numNets++ - activeNetParams = &testNet3Params - } - if cfg.RegressionTest { - numNets++ - activeNetParams = ®ressionNetParams + activeNetParams = &testNetParams } if cfg.SimNet { numNets++ @@ -434,7 +434,7 @@ func loadConfig() (*config, []string, error) { cfg.DisableDNSSeed = true } if numNets > 1 { - str := "%s: The testnet, regtest, and simnet params can't be " + + str := "%s: The testnet and simnet params can't be " + "used together -- choose one of the three" err := fmt.Errorf(str, funcName) fmt.Fprintln(os.Stderr, err) @@ -484,7 +484,7 @@ func loadConfig() (*config, []string, error) { return nil, nil, err } - if cfg.AddrIndex && cfg.DropAddrIndex { + if !cfg.NoAddrIndex && cfg.DropAddrIndex { err := fmt.Errorf("addrindex and dropaddrindex cannot be " + "activated at the same") fmt.Fprintln(os.Stderr, err) @@ -493,7 +493,7 @@ func loadConfig() (*config, []string, error) { } // Memdb does not currently support the addrindex. - if cfg.DbType == "memdb" && cfg.AddrIndex { + if cfg.DbType == "memdb" && !cfg.NoAddrIndex { err := fmt.Errorf("memdb does not currently support the addrindex") fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, usageMessage) @@ -618,10 +618,10 @@ func loadConfig() (*config, []string, error) { cfg.BlockMinSize = minUint32(cfg.BlockMinSize, cfg.BlockMaxSize) // Check getwork keys are valid and saved parsed versions. - cfg.miningAddrs = make([]btcutil.Address, 0, len(cfg.GetWorkKeys)+ + cfg.miningAddrs = make([]dcrutil.Address, 0, len(cfg.GetWorkKeys)+ len(cfg.MiningAddrs)) for _, strAddr := range cfg.GetWorkKeys { - addr, err := btcutil.DecodeAddress(strAddr, + addr, err := dcrutil.DecodeAddress(strAddr, activeNetParams.Params) if err != nil { str := "%s: getworkkey '%s' failed to decode: %v" @@ -642,7 +642,7 @@ func loadConfig() (*config, []string, error) { // Check mining addresses are valid and saved parsed versions. for _, strAddr := range cfg.MiningAddrs { - addr, err := btcutil.DecodeAddress(strAddr, activeNetParams.Params) + addr, err := dcrutil.DecodeAddress(strAddr, activeNetParams.Params) if err != nil { str := "%s: mining address '%s' failed to decode: %v" err := fmt.Errorf(str, funcName, strAddr, err) @@ -748,7 +748,7 @@ func loadConfig() (*config, []string, error) { if cfg.TorIsolation && (cfg.ProxyUser != "" || cfg.ProxyPass != "") { - btcdLog.Warn("Tor isolation set -- overriding " + + dcrdLog.Warn("Tor isolation set -- overriding " + "specified proxy user credentials") } @@ -786,7 +786,7 @@ func loadConfig() (*config, []string, error) { if cfg.TorIsolation && (cfg.OnionProxyUser != "" || cfg.OnionProxyPass != "") { - btcdLog.Warn("Tor isolation set -- overriding " + + dcrdLog.Warn("Tor isolation set -- overriding " + "specified onionproxy user credentials ") } @@ -822,32 +822,32 @@ func loadConfig() (*config, []string, error) { // done. This prevents the warning on help messages and invalid // options. Note this should go directly before the return. if configFileError != nil { - btcdLog.Warnf("%v", configFileError) + dcrdLog.Warnf("%v", configFileError) } return &cfg, remainingArgs, nil } -// btcdDial connects to the address on the named network using the appropriate +// dcrdDial connects to the address on the named network using the appropriate // dial function depending on the address and configuration options. For // example, .onion addresses will be dialed using the onion specific proxy if // one was specified, but will otherwise use the normal dial function (which // could itself use a proxy or not). -func btcdDial(network, address string) (net.Conn, error) { - if strings.Contains(address, ".onion:") { +func dcrdDial(network, address string) (net.Conn, error) { + if strings.HasSuffix(address, ".onion") { return cfg.oniondial(network, address) } return cfg.dial(network, address) } -// btcdLookup returns the correct DNS lookup function to use depending on the +// dcrdLookup returns the correct DNS lookup function to use depending on the // passed host and configuration options. For example, .onion addresses will be // resolved using the onion specific proxy if one was specified, but will // otherwise treat the normal proxy as tor unless --noonion was specified in // which case the lookup will fail. Meanwhile, normal IP addresses will be // resolved using tor if a proxy was specified unless --noonion was also // specified in which case the normal system DNS resolver will be used. -func btcdLookup(host string) ([]net.IP, error) { +func dcrdLookup(host string) ([]net.IP, error) { if strings.HasSuffix(host, ".onion") { return cfg.onionlookup(host) } diff --git a/cpuminer.go b/cpuminer.go index eaba26cd..440bd797 100644 --- a/cpuminer.go +++ b/cpuminer.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,13 +9,14 @@ import ( "errors" "fmt" "math/rand" - "runtime" "sync" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -35,13 +37,18 @@ const ( // reduce the amount of syncs between the workers that must be done to // keep track of the hashes per second. hashUpdateSecs = 15 + + // maxSimnetToMine is the maximum number of blocks to mine on HEAD~1 + // for simnet so that you don't run out of memory if tickets for + // some reason run out during simulations. + maxSimnetToMine uint8 = 4 ) var ( // defaultNumWorkers is the default number of workers to use for mining // and is based on the number of processor cores. This helps ensure the // system stays reasonably responsive under heavy load. - defaultNumWorkers = uint32(runtime.NumCPU()) + defaultNumWorkers = uint32(chaincfg.CPUMinerThreads) ) // CPUMiner provides facilities for solving blocks (mining) using the CPU in @@ -64,6 +71,13 @@ type CPUMiner struct { updateHashes chan uint64 speedMonitorQuit chan struct{} quit chan struct{} + + // This is a map that keeps track of how many blocks have + // been mined on each parent by the CPUMiner. It is only + // for use in simulation networks, to diminish memory + // exhaustion. It should not race because it's only + // accessed in a single threaded loop below. + minedOnParents map[chainhash.Hash]uint8 } // speedMonitor handles tracking the number of hashes per second the mining @@ -112,47 +126,60 @@ out: // submitBlock submits the passed block to network after ensuring it passes all // of the consensus validation rules. -func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { +func (m *CPUMiner) submitBlock(block *dcrutil.Block) bool { m.submitBlockLock.Lock() defer m.submitBlockLock.Unlock() - // Ensure the block is not stale since a new block could have shown up - // while the solution was being found. Typically that condition is - // detected and all work on the stale block is halted to start work on - // a new block, but the check only happens periodically, so it is - // possible a block was found and submitted in between. - latestHash, _ := m.server.blockManager.chainState.Best() - msgBlock := block.MsgBlock() - if !msgBlock.Header.PrevBlock.IsEqual(latestHash) { - minrLog.Debugf("Block submitted via CPU miner with previous "+ - "block %s is stale", msgBlock.Header.PrevBlock) - return false - } + _, latestHeight := m.server.blockManager.chainState.Best() + + // Be sure to set this so ProcessBlock doesn't fail! - Decred + block.SetHeight(latestHeight + 1) // Process this block using the same rules as blocks coming from other - // nodes. This will in turn relay it to the network like normal. + // nodes. This will in turn relay it to the network like normal. isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. - if _, ok := err.(blockchain.RuleError); !ok { + if rErr, ok := err.(blockchain.RuleError); !ok { minrLog.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false + } else { + // Occasionally errors are given out for timing errors with + // ResetMinDifficulty and high block works that is above + // the target. Feed these to debug. + if m.server.chainParams.ResetMinDifficulty && + rErr.ErrorCode == blockchain.ErrHighHash { + minrLog.Debugf("Block submitted via CPU miner rejected "+ + "because of ResetMinDifficulty time sync failure: %v", + err) + return false + } else { + // Other rule errors should be reported. + minrLog.Errorf("Block submitted via CPU miner rejected: %v", err) + return false + } } - minrLog.Debugf("Block submitted via CPU miner rejected: %v", err) - return false } if isOrphan { - minrLog.Debugf("Block submitted via CPU miner is an orphan") + minrLog.Errorf("Block submitted via CPU miner is an orphan building "+ + "on parent %v", block.MsgBlock().Header.PrevBlock) return false } // The block was accepted. - coinbaseTx := block.MsgBlock().Transactions[0].TxOut[0] + coinbaseTxOuts := block.MsgBlock().Transactions[0].TxOut + coinbaseTxGenerated := int64(0) + for _, out := range coinbaseTxOuts { + coinbaseTxGenerated += out.Value + } minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+ - "amount %v)", block.Sha(), btcutil.Amount(coinbaseTx.Value)) + "height %v, amount %v)", + block.Sha(), + block.Height(), + dcrutil.Amount(coinbaseTxGenerated)) return true } @@ -165,8 +192,10 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { // This function will return early with false when conditions that trigger a // stale block such as a new block showing up or periodically when there are // new transactions and enough time has elapsed without finding a solution. -func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int64, - ticker *time.Ticker, quit chan struct{}) bool { +func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, ticker *time.Ticker, + quit chan struct{}) bool { + + blockHeight := int64(msgBlock.Header.Height) // Choose a random extra nonce offset for this block template and // worker. @@ -190,10 +219,14 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int64, // added relying on the fact that overflow will wrap around 0 as // provided by the Go spec. for extraNonce := uint64(0); extraNonce < maxExtraNonce; extraNonce++ { + // Get the old nonce values. + ens := getCoinbaseExtranonces(msgBlock) + ens[2] = extraNonce + enOffset + // Update the extra nonce in the block template with the // new value by regenerating the coinbase script and // setting the merkle root to the new value. The - UpdateExtraNonce(msgBlock, blockHeight, extraNonce+enOffset) + UpdateExtraNonce(msgBlock, blockHeight, ens) // Search through the entire nonce range for a solution while // periodically checking for early quit and stale block @@ -207,20 +240,13 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int64, m.updateHashes <- hashesCompleted hashesCompleted = 0 - // The current block is stale if the best block - // has changed. - bestHash, _ := m.server.blockManager.chainState.Best() - if !header.PrevBlock.IsEqual(bestHash) { - return false - } - // The current block is stale if the memory pool // has been updated since the block template was - // generated and it has been at least one - // minute. - if lastTxUpdate != m.server.txMemPool.LastUpdated() && - time.Now().After(lastGenerated.Add(time.Minute)) { - + // generated and it has been at least 3 seconds, + // or if it's been one minute. + if (lastTxUpdate != m.server.txMemPool.LastUpdated() && + time.Now().After(lastGenerated.Add(3*time.Second))) || + time.Now().After(lastGenerated.Add(60*time.Second)) { return false } @@ -230,13 +256,10 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int64, // Non-blocking select to fall through } - // Update the nonce and hash the block header. Each - // hash is actually a double sha256 (two hashes), so - // increment the number of hashes completed for each - // attempt accordingly. + // Update the nonce and hash the block header. header.Nonce = i hash := header.BlockSha() - hashesCompleted += 2 + hashesCompleted += 1 // The block is solved when the new block hash is less // than the target difficulty. Yay! @@ -262,8 +285,9 @@ func (m *CPUMiner) generateBlocks(quit chan struct{}) { // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. - ticker := time.NewTicker(time.Second * hashUpdateSecs) + ticker := time.NewTicker(333 * time.Millisecond) defer ticker.Stop() + out: for { // Quit when the miner is stopped. @@ -274,25 +298,26 @@ out: // Non-blocking select to fall through } - // Wait until there is a connection to at least one other peer - // since there is no way to relay a found block or receive - // transactions to work on when there are no connected peers. - if m.server.ConnectedCount() == 0 { - time.Sleep(time.Second) - continue - } - // No point in searching for a solution before the chain is // synced. Also, grab the same lock as used for block // submission, since the current block will be changing and // this would otherwise end up building a new block template on // a block that is in the process of becoming stale. m.submitBlockLock.Lock() - _, curHeight := m.server.blockManager.chainState.Best() - if curHeight != 0 && !m.server.blockManager.IsCurrent() { - m.submitBlockLock.Unlock() - time.Sleep(time.Second) - continue + time.Sleep(100 * time.Millisecond) + + // Hacks to make dcr work with Decred PoC (simnet only) + // TODO Remove before production. + if cfg.SimNet { + _, curHeight := m.server.blockManager.chainState.Best() + + if curHeight == 1 { + time.Sleep(5500 * time.Millisecond) // let wallet reconn + } else if curHeight > 100 && curHeight < 201 { // slow down to i + time.Sleep(10 * time.Millisecond) // 2500 + } else { // burn through the first pile of blocks + time.Sleep(10 * time.Millisecond) + } } // Choose a payment address at random. @@ -311,13 +336,31 @@ out: continue } + // Not enough voters. + if template == nil { + continue + } + + // This prevents you from causing memory exhaustion issues + // when mining aggressively in a simulation network. + if cfg.SimNet { + if m.minedOnParents[template.block.Header.PrevBlock] >= + maxSimnetToMine { + minrLog.Tracef("too many blocks mined on parent, stopping " + + "until there are enough votes on these to make a new " + + "block") + continue + } + } + // Attempt to solve the block. The function will exit early // with false when conditions that trigger a stale block, so // a new block template can be generated. When the return is // true a solution was found, so submit the solved block. - if m.solveBlock(template.block, curHeight+1, ticker, quit) { - block := btcutil.NewBlock(template.block) + if m.solveBlock(template.block, ticker, quit) { + block := dcrutil.NewBlock(template.block) m.submitBlock(block) + m.minedOnParents[template.block.Header.PrevBlock]++ } } @@ -505,7 +548,7 @@ func (m *CPUMiner) NumWorkers() int32 { // detecting when it is performing stale work and reacting accordingly by // generating a new block template. When a block is solved, it is submitted. // The function returns a list of the hashes of generated blocks. -func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*wire.ShaHash, error) { +func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) { m.Lock() // Respond with an error if there's virtually 0 chance of CPU-mining a block. @@ -535,7 +578,7 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*wire.ShaHash, error) { minrLog.Tracef("Generating %d blocks", n) i := uint32(0) - blockHashes := make([]*wire.ShaHash, n, n) + blockHashes := make([]*chainhash.Hash, n, n) // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. @@ -555,7 +598,6 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*wire.ShaHash, error) { // be changing and this would otherwise end up building a new block // template on a block that is in the process of becoming stale. m.submitBlockLock.Lock() - _, curHeight := m.server.blockManager.chainState.Best() // Choose a payment address at random. rand.Seed(time.Now().UnixNano()) @@ -572,13 +614,19 @@ func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*wire.ShaHash, error) { minrLog.Errorf(errStr) continue } + if template == nil { + errStr := fmt.Sprintf("Not enough voters on parent block " + + "and failed to pull parent template") + minrLog.Debugf(errStr) + continue + } // Attempt to solve the block. The function will exit early // with false when conditions that trigger a stale block, so // a new block template can be generated. When the return is // true a solution was found, so submit the solved block. - if m.solveBlock(template.block, curHeight+1, ticker, nil) { - block := btcutil.NewBlock(template.block) + if m.solveBlock(template.block, ticker, nil) { + block := dcrutil.NewBlock(template.block) m.submitBlock(block) blockHashes[i] = block.Sha() i++ @@ -606,5 +654,6 @@ func newCPUMiner(s *server) *CPUMiner { updateNumWorkers: make(chan struct{}), queryHashesPerSec: make(chan float64), updateHashes: make(chan uint64), + minedOnParents: make(map[chainhash.Hash]uint8), } } diff --git a/database/README.md b/database/README.md index 81a7188d..d9e27c10 100644 --- a/database/README.md +++ b/database/README.md @@ -1,76 +1,55 @@ database ======== -[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)] -(https://travis-ci.org/btcsuite/btcd) [![ISC License] +[![ISC License] (http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -Package database provides a database interface for the bitcoin block chain and +Package database provides a database interface for the decred block chain and transactions. -Please note that this package is intended to enable btcd to support different +Please note that this package is intended to enable dcrd to support different database backends and is not something that a client can directly access as only one entity can have the database open at a time (for most database backends), -and that entity will be btcd. +and that entity will be dcrd. -When a client wants programmatic access to the data provided by btcd, they'll -likely want to use the [btcrpcclient](https://github.com/btcsuite/btcrpcclient) +When a client wants programmatic access to the data provided by dcrd, they'll +likely want to use the [btcrpcclient](https://github.com/decred/btcrpcclient) package which makes use of the [JSON-RPC API] -(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md). +(https://github.com/decred/dcrd/tree/master/docs/json_rpc_api.md). ## Documentation [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/btcsuite/btcd/database) +(http://godoc.org/github.com/decred/dcrd/database) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site -[here](http://godoc.org/github.com/btcsuite/btcd/database). +[here](http://godoc.org/github.com/decred/dcrd/database). You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/database +http://localhost:6060/pkg/github.com/decred/dcrd/database ## Installation ```bash -$ go get github.com/btcsuite/btcd/database +$ go get github.com/decred/dcrd/database ``` ## Examples * [CreateDB Example] - (http://godoc.org/github.com/btcsuite/btcd/database#example-CreateDB) + (http://godoc.org/github.com/decred/dcrd/database#example-CreateDB) Demonstrates creating a new database and inserting the genesis block into it. * [NewestSha Example] - (http://godoc.org/github.com/btcsuite/btcd/database#example-Db--NewestSha) + (http://godoc.org/github.com/decred/dcrd/database#example-Db--NewestSha) Demonstrates querying the database for the most recent best block height and hash. ## TODO - Increase test coverage to 100% -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - ## License Package database is licensed under the [copyfree](http://copyfree.org) ISC diff --git a/database/common_test.go b/database/common_test.go index 3b900365..cb623bf7 100644 --- a/database/common_test.go +++ b/database/common_test.go @@ -1,42 +1,42 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package database_test import ( + "bytes" "compress/bzip2" - "encoding/binary" + "encoding/gob" "fmt" - "io" "os" "path/filepath" - "strings" "testing" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - _ "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + _ "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) var ( - // network is the expected bitcoin network in the test block data. - network = wire.MainNet + // network is the expected decred network in the test block data. + network = wire.SimNet // savedBlocks is used to store blocks loaded from the blockDataFile // so multiple invocations to loadBlocks from the various test functions // do not have to reload them from disk. - savedBlocks []*btcutil.Block + savedBlocks []*dcrutil.Block - // blockDataFile is the path to a file containing the first 256 blocks - // of the block chain. - blockDataFile = filepath.Join("testdata", "blocks1-256.bz2") + // blockDataFile is the path to a file containing the first 168 blocks + // of a simulated blockchain designed to abuse network rules. + blockDataFile = filepath.Join("../blockchain/testdata", "blocks0to168.bz2") ) -var zeroHash = wire.ShaHash{} +var zeroHash = chainhash.Hash{} // testDbRoot is the root directory used to create all test databases. const testDbRoot = "testdbs" @@ -137,82 +137,45 @@ func setupDB(dbType, dbName string) (database.Db, func(), error) { if err != nil { return nil, nil, err } - - // Insert the main network genesis block. This is part of the initial - // database setup. - genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) - _, err = db.InsertBlock(genesisBlock) - if err != nil { - teardown() - err := fmt.Errorf("failed to insert genesis block: %v", err) - return nil, nil, err - } - return db, teardown, nil } // loadBlocks loads the blocks contained in the testdata directory and returns // a slice of them. -func loadBlocks(t *testing.T) ([]*btcutil.Block, error) { +func loadBlocks(t *testing.T) ([]*dcrutil.Block, error) { if len(savedBlocks) != 0 { return savedBlocks, nil } - var dr io.Reader fi, err := os.Open(blockDataFile) if err != nil { t.Errorf("failed to open file %v, err %v", blockDataFile, err) return nil, err } - if strings.HasSuffix(blockDataFile, ".bz2") { - z := bzip2.NewReader(fi) - dr = z - } else { - dr = fi + bcStream := bzip2.NewReader(fi) + defer fi.Close() + + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) + + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockchain := make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockchain); err != nil { + t.Errorf("error decoding test blockchain") } - defer func() { - if err := fi.Close(); err != nil { - t.Errorf("failed to close file %v %v", blockDataFile, err) - } - }() - - // Set the first block as the genesis block. - blocks := make([]*btcutil.Block, 0, 256) - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) - blocks = append(blocks, genesis) - - for height := int64(1); err == nil; height++ { - var rintbuf uint32 - err := binary.Read(dr, binary.LittleEndian, &rintbuf) - if err == io.EOF { - // hit end of file at expected offset: no warning - height-- - err = nil - break - } - if err != nil { - t.Errorf("failed to load network type, err %v", err) - break - } - if rintbuf != uint32(network) { - t.Errorf("Block doesn't match network: %v expects %v", - rintbuf, network) - break - } - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - blocklen := rintbuf - - rbytes := make([]byte, blocklen) - - // read block - dr.Read(rbytes) - - block, err := btcutil.NewBlockFromBytes(rbytes) + blocks := make([]*dcrutil.Block, 0, len(blockchain)) + for height := int64(1); height < int64(len(blockchain)); height++ { + block, err := dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { t.Errorf("failed to parse block %v", height) return nil, err } + block.SetHeight(height - 1) blocks = append(blocks, block) } diff --git a/database/db.go b/database/db.go index e17510de..40b88d52 100644 --- a/database/db.go +++ b/database/db.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,14 +8,17 @@ package database import ( "errors" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" + "github.com/btcsuite/golangcrypto/ripemd160" ) // Errors that the various database functions may return. var ( - ErrAddrIndexDoesNotExist = errors.New("address index hasn't been built or is an older version") + ErrAddrIndexDoesNotExist = errors.New("address index hasn't been built " + + "or is an older version") ErrUnsupportedAddressType = errors.New("address type is not supported " + "by the address-index") ErrPrevShaMissing = errors.New("previous sha missing from database") @@ -23,6 +27,7 @@ var ( ErrDuplicateSha = errors.New("duplicate insert attempted") ErrDbDoesNotExist = errors.New("non-existent database") ErrDbUnknownType = errors.New("non-existent database type") + ErrDbInconsistency = errors.New("inconsistent database") ErrNotImplemented = errors.New("method has not yet been implemented") ) @@ -31,7 +36,7 @@ var ( const AllShas = int64(^uint64(0) >> 1) // Db defines a generic interface that is used to request and insert data into -// the bitcoin block chain. This interface is intended to be agnostic to actual +// the decred block chain. This interface is intended to be agnostic to actual // mechanism used for backend data storage. The AddDBDriver function can be // used to add a new backend data storage method. type Db interface { @@ -42,40 +47,40 @@ type Db interface { // the given block. It terminates any existing transaction and performs // its operations in an atomic transaction which is commited before // the function returns. - DropAfterBlockBySha(*wire.ShaHash) (err error) + DropAfterBlockBySha(*chainhash.Hash) (err error) // ExistsSha returns whether or not the given block hash is present in // the database. - ExistsSha(sha *wire.ShaHash) (exists bool, err error) + ExistsSha(sha *chainhash.Hash) (exists bool, err error) - // FetchBlockBySha returns a btcutil Block. The implementation may + // FetchBlockBySha returns a dcrutil Block. The implementation may // cache the underlying data if desired. - FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) + FetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err error) // FetchBlockHeightBySha returns the block height for the given hash. - FetchBlockHeightBySha(sha *wire.ShaHash) (height int64, err error) + FetchBlockHeightBySha(sha *chainhash.Hash) (height int64, err error) // FetchBlockHeaderBySha returns a wire.BlockHeader for the given // sha. The implementation may cache the underlying data if desired. - FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeader, err error) + FetchBlockHeaderBySha(sha *chainhash.Hash) (bh *wire.BlockHeader, err error) // FetchBlockShaByHeight returns a block hash based on its height in the // block chain. - FetchBlockShaByHeight(height int64) (sha *wire.ShaHash, err error) + FetchBlockShaByHeight(height int64) (sha *chainhash.Hash, err error) // FetchHeightRange looks up a range of blocks by the start and ending // heights. Fetch is inclusive of the start height and exclusive of the // ending height. To fetch all hashes from the start height until no // more are present, use the special id `AllShas'. - FetchHeightRange(startHeight, endHeight int64) (rshalist []wire.ShaHash, err error) + FetchHeightRange(startHeight, endHeight int64) (rshalist []chainhash.Hash, err error) // ExistsTxSha returns whether or not the given tx hash is present in // the database - ExistsTxSha(sha *wire.ShaHash) (exists bool, err error) + ExistsTxSha(sha *chainhash.Hash) (exists bool, err error) // FetchTxBySha returns some data for the given transaction hash. The // implementation may cache the underlying data if desired. - FetchTxBySha(txsha *wire.ShaHash) ([]*TxListReply, error) + FetchTxBySha(txsha *chainhash.Hash) ([]*TxListReply, error) // FetchTxByShaList returns a TxListReply given an array of transaction // hashes. The implementation may cache the underlying data if desired. @@ -86,7 +91,7 @@ type Db interface { // return at least one TxListReply instance for each requested // transaction. Each TxListReply instance then contains an Err field // which can be used to detect errors. - FetchTxByShaList(txShaList []*wire.ShaHash) []*TxListReply + FetchTxByShaList(txShaList []*chainhash.Hash) []*TxListReply // FetchUnSpentTxByShaList returns a TxListReply given an array of // transaction hashes. The implementation may cache the underlying @@ -97,25 +102,25 @@ type Db interface { // return at least one TxListReply instance for each requested // transaction. Each TxListReply instance then contains an Err field // which can be used to detect errors. - FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*TxListReply + FetchUnSpentTxByShaList(txShaList []*chainhash.Hash) []*TxListReply // InsertBlock inserts raw block and transaction data from a block // into the database. The first block inserted into the database // will be treated as the genesis block. Every subsequent block insert // requires the referenced parent block to already exist. - InsertBlock(block *btcutil.Block) (height int64, err error) + InsertBlock(block *dcrutil.Block) (height int64, err error) // NewestSha returns the hash and block height of the most recent (end) // block of the block chain. It will return the zero hash, -1 for // the block height, and no error (nil) if there are not any blocks in // the database yet. - NewestSha() (sha *wire.ShaHash, height int64, err error) + NewestSha() (sha *chainhash.Hash, height int64, err error) // FetchAddrIndexTip returns the hash and block height of the most recent // block which has had its address index populated. It will return // ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the // addrindex hasn't yet been built up. - FetchAddrIndexTip() (sha *wire.ShaHash, height int64, err error) + FetchAddrIndexTip() (sha *chainhash.Hash, height int64, err error) // UpdateAddrIndexForBlock updates the stored addrindex with passed // index information for a particular block height. Additionally, it @@ -124,7 +129,12 @@ type Db interface { // transaction which is commited before the function returns. // Addresses are indexed by the raw bytes of their base58 decoded // hash160. - UpdateAddrIndexForBlock(blkSha *wire.ShaHash, height int64, + UpdateAddrIndexForBlock(blkSha *chainhash.Hash, height int64, + addrIndex BlockAddrIndex) error + + // DropAddrIndexForBlock removes all passed address indexes and sets + // the current block index below the previous HEAD. + DropAddrIndexForBlock(blkSha *chainhash.Hash, height int64, addrIndex BlockAddrIndex) error // FetchTxsForAddr looks up and returns all transactions which either @@ -134,10 +144,11 @@ type Db interface { // Additionally, if the caller wishes to skip forward in the results // some amount, the 'seek' represents how many results to skip. // NOTE: Values for both `seek` and `limit` MUST be positive. - FetchTxsForAddr(addr btcutil.Address, skip int, limit int) ([]*TxListReply, error) + FetchTxsForAddr(addr dcrutil.Address, skip int, + limit int) ([]*TxListReply, error) - // DeleteAddrIndex deletes the entire addrindex stored within the DB. - DeleteAddrIndex() error + // PurgeAddrIndex deletes the entire addrindex stored within the DB. + PurgeAddrIndex() error // RollbackClose discards the recent database changes to the previously // saved data at last Sync and closes the database. @@ -159,21 +170,36 @@ type DriverDB struct { // TxListReply is used to return individual transaction information when // data about multiple transactions is requested in a single call. type TxListReply struct { - Sha *wire.ShaHash + Sha *chainhash.Hash Tx *wire.MsgTx - BlkSha *wire.ShaHash + BlkSha *chainhash.Hash Height int64 + Index uint32 TxSpent []bool Err error } +// TxAddrIndex is the location of a transaction containing an address or script +// hash reference inside a transaction, as given by the block it is found in. +type TxAddrIndex struct { + Hash160 [ripemd160.Size]byte + Height uint32 + TxOffset uint32 + TxLen uint32 +} + // AddrIndexKeySize is the number of bytes used by keys into the BlockAddrIndex. -const AddrIndexKeySize = ripemd160.Size +// 3 byte prefix ([]byte("a+-")) +// 20 byte RIPEMD160 hash +// 4 byte block height +// 4 byte txoffset +// 4 byte txlen +const AddrIndexKeySize = 3 + ripemd160.Size + 4 + 4 + 4 // BlockAddrIndex represents the indexing structure for addresses. // It maps a hash160 to a list of transaction locations within a block that // either pays to or spends from the passed UTXO for the hash160. -type BlockAddrIndex map[[AddrIndexKeySize]byte][]*wire.TxLoc +type BlockAddrIndex []*TxAddrIndex // driverList holds all of the registered database backends. var driverList []DriverDB diff --git a/database/db_test.go b/database/db_test.go index ac238b9c..116d5136 100644 --- a/database/db_test.go +++ b/database/db_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "fmt" "testing" - "github.com/btcsuite/btcd/database" + "github.com/decred/dcrd/database" ) var ( diff --git a/database/doc.go b/database/doc.go index 4e702d30..78662b55 100644 --- a/database/doc.go +++ b/database/doc.go @@ -1,11 +1,12 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package database provides a database interface for the Bitcoin block chain. +Package database provides a database interface for the Decred block chain. -As of July 2014, there are over 309,000 blocks in the Bitcoin block chain and +As of July 2014, there are over 309,000 blocks in the Decred block chain and and over 42 million transactions (which turns out to be over 21GB of data). This package provides a database layer to store and retrieve this data in a fairly simple and efficient manner. The use of this should not require specific diff --git a/database/example_test.go b/database/example_test.go index a0e5f534..9236a048 100644 --- a/database/example_test.go +++ b/database/example_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,10 +8,10 @@ package database_test import ( "fmt" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrutil" ) // This example demonstrates creating a new database and inserting the genesis @@ -20,8 +21,8 @@ func ExampleCreateDB() { // Ordinarily this would be whatever driver(s) your application // requires. // import ( - // "github.com/btcsuite/btcd/database" - // _ "github.com/btcsuite/btcd/database/memdb" + // "github.com/decred/dcrd/database" + // _ "github.com/decred/dcrd/database/memdb" // ) // Create a database and schedule it to be closed on exit. This example @@ -37,7 +38,8 @@ func ExampleCreateDB() { defer db.Close() // Insert the main network genesis block. - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesis := dcrutil.NewBlock(chaincfg.TestNetParams.GenesisBlock) + genesis.SetHeight(0) newHeight, err := db.InsertBlock(genesis) if err != nil { fmt.Println(err) @@ -58,7 +60,8 @@ func exampleLoadDB() (database.Db, error) { } // Insert the main network genesis block. - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesis := dcrutil.NewBlock(chaincfg.TestNetParams.GenesisBlock) + genesis.SetHeight(0) _, err = db.InsertBlock(genesis) if err != nil { return nil, err @@ -89,6 +92,6 @@ func ExampleDb_newestSha() { fmt.Println("Latest height:", latestHeight) // Output: - // Latest hash: 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f + // Latest hash: 5b7466edf6739adc9b32aaedc54e24bdc59a05f0ced855088835fe3cbe58375f // Latest height: 0 } diff --git a/database/interface_test.go b/database/interface_test.go index e44eb765..ee286863 100644 --- a/database/interface_test.go +++ b/database/interface_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,10 +9,11 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // testContext is used to store context information about a running test which @@ -27,8 +29,8 @@ type testContext struct { dbType string db database.Db blockHeight int64 - blockHash *wire.ShaHash - block *btcutil.Block + blockHash *chainhash.Hash + block *dcrutil.Block useSpends bool } @@ -235,55 +237,111 @@ func testFetchBlockShaByHeightErrors(tc *testContext) bool { // testExistsTxSha ensures ExistsTxSha conforms to the interface contract. func testExistsTxSha(tc *testContext) bool { - for i, tx := range tc.block.Transactions() { - // The transaction must exist in the database. - txHash := tx.Sha() - exists, err := tc.db.ExistsTxSha(txHash) - if err != nil { - tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) tx #%d "+ - "(%s) unexpected error: %v", tc.dbType, - tc.blockHeight, tc.blockHash, i, txHash, err) - return false - } - if !exists { - _, err := tc.db.FetchTxBySha(txHash) - if err != nil { - tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) "+ - "tx #%d (%s) does not exist", tc.dbType, - tc.blockHeight, tc.blockHash, i, txHash) - } - return false + var blockPrev *dcrutil.Block = nil + // Decred: WARNING. This function assumes that all block insertion calls have + // dcrutil.blocks passed to them with block.blockHeight set correctly. However, + // loading the genesis block in dcrd didn't do this (via block manager); pre- + // production it should be established that all calls to this function pass + // blocks with block.blockHeight set correctly. + if tc.block.Height() != 0 { + var errBlockPrev error + blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) + if errBlockPrev != nil { + blockSha := tc.block.Sha() + tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) } } + votebits := tc.block.MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + for i, tx := range blockPrev.Transactions() { + // The transaction must exist in the database. + txHash := tx.Sha() + exists, err := tc.db.ExistsTxSha(txHash) + if err != nil { + tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) tx #%d "+ + "(%s) unexpected error: %v", tc.dbType, + tc.blockHeight, tc.blockHash, i, txHash, err) + return false + } + if !exists { + _, err := tc.db.FetchTxBySha(txHash) + if err != nil { + tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) "+ + "tx #%d (%s) does not exist", tc.dbType, + tc.blockHeight, tc.blockHash, i, txHash) + } + return false + } + } + } return true } // testFetchTxBySha ensures FetchTxBySha conforms to the interface contract. func testFetchTxBySha(tc *testContext) bool { - for i, tx := range tc.block.Transactions() { - txHash := tx.Sha() - txReplyList, err := tc.db.FetchTxBySha(txHash) + var blockPrev *dcrutil.Block = nil + if tc.block.Height() != 0 { + var errBlockPrev error + blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) + if errBlockPrev != nil { + blockSha := tc.block.Sha() + tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) + } + } + + votebits := tc.block.MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + for i, tx := range blockPrev.Transactions() { + txHash := tx.Sha() + txReplyList, err := tc.db.FetchTxBySha(txHash) + if err != nil { + tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ + "tx #%d (%s) err: %v", tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, err) + return false + } + if len(txReplyList) == 0 { + tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ + "tx #%d (%s) did not return reply data", + tc.dbType, tc.blockHeight, tc.blockHash, i, + txHash) + return false + } + txFromDb := txReplyList[len(txReplyList)-1].Tx + if !reflect.DeepEqual(tx.MsgTx(), txFromDb) { + tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ + "tx #%d (%s, %s) does not match stored tx\n"+ + "got: %v\nwant: %v", tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txFromDb.TxSha(), spew.Sdump(txFromDb), + spew.Sdump(tx.MsgTx())) + return false + } + } + } + for i, tx := range tc.block.MsgBlock().STransactions { + txHash := tx.TxSha() + txReplyList, err := tc.db.FetchTxBySha(&txHash) if err != nil { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ - "tx #%d (%s) err: %v", tc.dbType, tc.blockHeight, + "sstx #%d (%s) err: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, err) return false } if len(txReplyList) == 0 { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ - "tx #%d (%s) did not return reply data", + "sstx #%d (%s) did not return reply data", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) return false } txFromDb := txReplyList[len(txReplyList)-1].Tx - if !reflect.DeepEqual(tx.MsgTx(), txFromDb) { + if !reflect.DeepEqual(tx, txFromDb) { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ - "tx #%d (%s) does not match stored tx\n"+ + "sstx #%d (%s) does not match stored sstx\n"+ "got: %v\nwant: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, spew.Sdump(txFromDb), - spew.Sdump(tx.MsgTx())) + spew.Sdump(tx)) return false } } @@ -302,151 +360,512 @@ func testFetchTxBySha(tc *testContext) bool { // be spent yet. However, on subsequent runs, all blocks have been inserted and // therefore some of the transaction outputs are spent. func expectedSpentBuf(tc *testContext, txNum int) []bool { - numTxOut := len(tc.block.MsgBlock().Transactions[txNum].TxOut) + var blah = []bool{false} + var blockPrev *dcrutil.Block = nil + if tc.block.Height() != 0 { + var errBlockPrev error + blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) + if errBlockPrev != nil { + blockSha := tc.block.Sha() + tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) + return blah + } + } + transactions := blockPrev.Transactions() + numTxOut := len(transactions[txNum].MsgTx().TxOut) spentBuf := make([]bool, numTxOut) if tc.useSpends { - if tc.blockHeight == 9 && txNum == 0 { - // Spent by block 170, tx 1, input 0. - // tx f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16 + if tc.blockHeight >= 2 && tc.blockHeight <= 43 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 45 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 46 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 48 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 49 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 54 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 55 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 57 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 59 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 63 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 67 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 68 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 69 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 70 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 73 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 74 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 76 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 77 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight >= 105 && tc.blockHeight <= 120 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 122 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 125 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 127 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 131 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 132 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 134 && txNum == 0 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + } + if tc.blockHeight == 44 && txNum == 1 { spentBuf[0] = true - } - - if tc.blockHeight == 170 && txNum == 1 { - // Spent by block 181, tx 1, input 0. - // tx a16f3ce4dd5deb92d98ef5cf8afeaf0775ebca408f708b2146c4fb42b41e14be spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = true } - - if tc.blockHeight == 181 && txNum == 1 { - // Spent by block 182, tx 1, input 0. - // tx 591e91f809d716912ca1d4a9295e70c3e78bab077683f79350f101da64588073 + if tc.blockHeight == 60 && txNum == 1 { + spentBuf[0] = false spentBuf[1] = true + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false } - - if tc.blockHeight == 182 && txNum == 1 { - // Spent by block 221, tx 1, input 0. - // tx 298ca2045d174f8a158961806ffc4ef96fad02d71a6b84d9fa0491813a776160 + if tc.blockHeight == 75 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 78 && txNum == 2 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 79 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = true + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 89 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 90 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 93 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 95 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 97 && txNum == 3 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 99 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 101 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 103 && txNum == 1 { spentBuf[0] = true - - // Spent by block 183, tx 1, input 0. - // tx 12b5633bad1f9c167d523ad1aa1947b2732a865bf5414eab2f9e5ae5d5c191ba - spentBuf[1] = true + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false } - - if tc.blockHeight == 183 && txNum == 1 { - // Spent by block 187, tx 1, input 0. - // tx 4385fcf8b14497d0659adccfe06ae7e38e0b5dc95ff8a13d7c62035994a0cd79 + if tc.blockHeight == 106 && txNum == 1 { spentBuf[0] = true - - // Spent by block 248, tx 1, input 0. - // tx 828ef3b079f9c23829c56fe86e85b4a69d9e06e5b54ea597eef5fb3ffef509fe spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = true + spentBuf[5] = false + } + if tc.blockHeight == 111 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 113 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = true + spentBuf[5] = false + } + if tc.blockHeight == 113 && txNum == 2 { + spentBuf[0] = true + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 117 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = false + spentBuf[4] = false + spentBuf[5] = false + } + if tc.blockHeight == 122 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = false + spentBuf[4] = true + spentBuf[5] = true + } + if tc.blockHeight == 131 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = true + spentBuf[5] = true + } + if tc.blockHeight == 135 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = true + spentBuf[5] = false + } + if tc.blockHeight == 141 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 142 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = true + spentBuf[2] = false + spentBuf[3] = true + spentBuf[4] = true + spentBuf[5] = true + } + if tc.blockHeight == 145 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = true + spentBuf[5] = true + } + if tc.blockHeight == 146 && txNum == 1 { + spentBuf[0] = true + spentBuf[1] = false + spentBuf[2] = false + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 146 && txNum == 2 { + spentBuf[0] = true + spentBuf[1] = true + spentBuf[2] = true + spentBuf[3] = true + spentBuf[4] = false + spentBuf[5] = true + } + if tc.blockHeight == 147 && txNum == 1 { + spentBuf[0] = false + spentBuf[1] = false + spentBuf[2] = true + spentBuf[3] = false + spentBuf[4] = true + spentBuf[5] = false } } return spentBuf } +// unspendStakeTxTree returns all outpoints spent before this one +// in the block's tx tree stake. used for unspending the stake tx +// tree to evaluate tx tree regular of prev block. +func unspendStakeTxTree(block *dcrutil.Block) map[wire.OutPoint]struct{} { + unspentOps := make(map[wire.OutPoint]struct{}) + + for _, tx := range block.STransactions() { + for _, txIn := range tx.MsgTx().TxIn { + unspentOps[txIn.PreviousOutPoint] = struct{}{} + } + } + + return unspentOps +} + func testFetchTxByShaListCommon(tc *testContext, includeSpent bool) bool { - fetchFunc := tc.db.FetchUnSpentTxByShaList - funcName := "FetchUnSpentTxByShaList" - if includeSpent { - fetchFunc = tc.db.FetchTxByShaList - funcName = "FetchTxByShaList" - } - - transactions := tc.block.Transactions() - txHashes := make([]*wire.ShaHash, len(transactions)) - for i, tx := range transactions { - txHashes[i] = tx.Sha() - } - - txReplyList := fetchFunc(txHashes) - if len(txReplyList) != len(txHashes) { - tc.t.Errorf("%s (%s): block #%d (%s) tx reply list does not "+ - " match expected length - got: %v, want: %v", funcName, - tc.dbType, tc.blockHeight, tc.blockHash, - len(txReplyList), len(txHashes)) - return false - } - for i, tx := range transactions { - txHash := tx.Sha() - txD := txReplyList[i] - - // The transaction hash in the reply must be the expected value. - if !txD.Sha.IsEqual(txHash) { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "hash does not match expected value - got %v", - funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, txD.Sha) - return false - } - - // The reply must not indicate any errors. - if txD.Err != nil { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "returned unexpected error - got %v, want nil", - funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, txD.Err) - return false - } - - // The transaction in the reply fetched from the database must - // be the same MsgTx that was stored. - if !reflect.DeepEqual(tx.MsgTx(), txD.Tx) { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) does "+ - "not match stored tx\ngot: %v\nwant: %v", - funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, spew.Sdump(txD.Tx), - spew.Sdump(tx.MsgTx())) - return false - } - - // The block hash in the reply from the database must be the - // expected value. - if txD.BlkSha == nil { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "returned nil block hash", funcName, tc.dbType, - tc.blockHeight, tc.blockHash, i, txHash) - return false - } - if !txD.BlkSha.IsEqual(tc.blockHash) { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s)"+ - "returned unexpected block hash - got %v", - funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, txD.BlkSha) - return false - } - - // The block height in the reply from the database must be the - // expected value. - if txD.Height != tc.blockHeight { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "returned unexpected block height - got %v", - funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, txD.Height) - return false - } - - // The spend data in the reply from the database must not - // indicate any of the transactions that were just inserted are - // spent. - if txD.TxSpent == nil { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "returned nil spend data", funcName, tc.dbType, - tc.blockHeight, tc.blockHash, i, txHash) - return false - } - spentBuf := expectedSpentBuf(tc, i) - if !reflect.DeepEqual(txD.TxSpent, spentBuf) { - tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ - "returned unexpected spend data - got %v, "+ - "want %v", funcName, tc.dbType, tc.blockHeight, - tc.blockHash, i, txHash, txD.TxSpent, spentBuf) - return false + var blockPrev *dcrutil.Block = nil + if tc.block.Height() != 0 { + var errBlockPrev error + blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) + if errBlockPrev != nil { + blockSha := tc.block.Sha() + tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) } } + unspentFromTxTreeStake := unspendStakeTxTree(tc.block) + + votebits := tc.block.MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + fetchFunc := tc.db.FetchUnSpentTxByShaList + funcName := "FetchUnSpentTxByShaList" + if includeSpent { + fetchFunc = tc.db.FetchTxByShaList + funcName = "FetchTxByShaList" + } + + transactions := blockPrev.Transactions() + txHashes := make([]*chainhash.Hash, len(transactions)) + for i, tx := range transactions { + txHashes[i] = tx.Sha() + } + + txReplyList := fetchFunc(txHashes) + if len(txReplyList) != len(txHashes) { + tc.t.Errorf("%s (%s): block #%d (%s) tx reply list does not "+ + " match expected length - got: %v, want: %v", funcName, + tc.dbType, tc.blockHeight, tc.blockHash, + len(txReplyList), len(txHashes)) + return false + } + for i, tx := range transactions { + txHash := tx.Sha() + txD := txReplyList[i] + + // The transaction hash in the reply must be the expected value. + if !txD.Sha.IsEqual(txHash) { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "hash does not match expected value - got %v", + funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txD.Sha) + return false + } + + // The reply must not indicate any errors. + if txD.Err != nil { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "returned unexpected error - got %v, want nil", + funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txD.Err) + return false + } + + // The transaction in the reply fetched from the database must + // be the same MsgTx that was stored. + if !reflect.DeepEqual(tx.MsgTx(), txD.Tx) { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) does "+ + "not match stored tx\ngot: %v\nwant: %v", + funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, spew.Sdump(txD.Tx), + spew.Sdump(tx.MsgTx())) + return false + } + + // The block hash in the reply from the database must be the + // expected value. + if txD.BlkSha == nil { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "returned nil block hash", funcName, tc.dbType, + tc.blockHeight, tc.blockHash, i, txHash) + return false + } + if !txD.BlkSha.IsEqual(&tc.block.MsgBlock().Header.PrevBlock) { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s)"+ + "returned unexpected block hash - got %v", + funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txD.BlkSha) + return false + } + + // The block height in the reply from the database must be the + // expected value. + if txD.Height != tc.blockHeight-1 { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "returned unexpected block height - got %v", + funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txD.Height) + return false + } + // The spend data in the reply from the database must not + // indicate any of the transactions that were just inserted are + // spent. + if txD.TxSpent == nil { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "returned nil spend data", funcName, tc.dbType, + tc.blockHeight, tc.blockHash, i, txHash) + return false + } + spentBuf := expectedSpentBuf(tc, i) + if !reflect.DeepEqual(txD.TxSpent, spentBuf) { + stakeInChecksDontPass := false + for txoIdx, _ := range spentBuf { + if txD.TxSpent[txoIdx] != spentBuf[txoIdx] { + op := wire.OutPoint{ + *txHash, + uint32(txoIdx), + dcrutil.TxTreeRegular, + } + + if _, unspent := unspentFromTxTreeStake[op]; !unspent { + stakeInChecksDontPass = true + } + } + } + + if stakeInChecksDontPass { + tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ + "returned unexpected spend data - got %v, "+ + "want %v", funcName, tc.dbType, tc.blockHeight, + tc.blockHash, i, txHash, txD.TxSpent, spentBuf) + return false + } + } + } + } return true } @@ -545,7 +964,7 @@ func testInterface(t *testing.T, dbType string) { context := testContext{t: t, dbType: dbType, db: db} t.Logf("Loaded %d blocks for testing %s", len(blocks), dbType) - for height := int64(1); height < int64(len(blocks)); height++ { + for height := int64(0); height < int64(len(blocks)); height++ { // Get the appropriate block and hash and update the test // context accordingly. block := blocks[height] @@ -612,14 +1031,14 @@ func testInterface(t *testing.T, dbType string) { - Close() - DropAfterBlockBySha(*wire.ShaHash) (err error) x ExistsSha(sha *wire.ShaHash) (exists bool) - x FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) + x FetchBlockBySha(sha *wire.ShaHash) (blk *dcrutil.Block, err error) x FetchBlockShaByHeight(height int64) (sha *wire.ShaHash, err error) - FetchHeightRange(startHeight, endHeight int64) (rshalist []wire.ShaHash, err error) x ExistsTxSha(sha *wire.ShaHash) (exists bool) x FetchTxBySha(txsha *wire.ShaHash) ([]*TxListReply, error) x FetchTxByShaList(txShaList []*wire.ShaHash) []*TxListReply x FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*TxListReply - x InsertBlock(block *btcutil.Block) (height int64, err error) + x InsertBlock(block *dcrutil.Block) (height int64, err error) x NewestSha() (sha *wire.ShaHash, height int64, err error) - RollbackClose() - Sync() diff --git a/database/ldb/block.go b/database/ldb/block.go index 1079420d..604df509 100644 --- a/database/ldb/block.go +++ b/database/ldb/block.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,29 +9,31 @@ import ( "bytes" "encoding/binary" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/btcsuite/goleveldb/leveldb" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) -// FetchBlockBySha - return a btcutil Block -func (db *LevelDb) FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) { +// FetchBlockBySha - return a dcrutil Block +func (db *LevelDb) FetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() return db.fetchBlockBySha(sha) } -// fetchBlockBySha - return a btcutil Block +// fetchBlockBySha - return a dcrutil Block // Must be called with db lock held. -func (db *LevelDb) fetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) { +func (db *LevelDb) fetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err error) { buf, height, err := db.fetchSha(sha) if err != nil { return } - blk, err = btcutil.NewBlockFromBytes(buf) + blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return } @@ -41,7 +44,7 @@ func (db *LevelDb) fetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err e // FetchBlockHeightBySha returns the block height for the given hash. This is // part of the database.Db interface implementation. -func (db *LevelDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int64, error) { +func (db *LevelDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int64, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -49,7 +52,7 @@ func (db *LevelDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int64, error) { } // FetchBlockHeaderBySha - return a ShaHash -func (db *LevelDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeader, err error) { +func (db *LevelDb) FetchBlockHeaderBySha(sha *chainhash.Hash) (bh *wire.BlockHeader, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -71,7 +74,7 @@ func (db *LevelDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeade return bh, err } -func (db *LevelDb) getBlkLoc(sha *wire.ShaHash) (int64, error) { +func (db *LevelDb) getBlkLoc(sha *chainhash.Hash) (int64, error) { key := shaBlkToKey(sha) data, err := db.lDb.Get(key, db.ro) @@ -88,7 +91,7 @@ func (db *LevelDb) getBlkLoc(sha *wire.ShaHash) (int64, error) { return int64(blkHeight), nil } -func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *wire.ShaHash, rbuf []byte, err error) { +func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *chainhash.Hash, rbuf []byte, err error) { var blkVal []byte key := int64ToKey(blkHeight) @@ -99,7 +102,7 @@ func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *wire.ShaHash, rbuf []b return // exists ??? } - var sha wire.ShaHash + var sha chainhash.Hash sha.SetBytes(blkVal[0:32]) @@ -109,7 +112,7 @@ func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *wire.ShaHash, rbuf []b return &sha, blockdata, nil } -func (db *LevelDb) getBlk(sha *wire.ShaHash) (rblkHeight int64, rbuf []byte, err error) { +func (db *LevelDb) getBlk(sha *chainhash.Hash) (rblkHeight int64, rbuf []byte, err error) { var blkHeight int64 blkHeight, err = db.getBlkLoc(sha) @@ -126,7 +129,7 @@ func (db *LevelDb) getBlk(sha *wire.ShaHash) (rblkHeight int64, rbuf []byte, err return blkHeight, buf, nil } -func (db *LevelDb) setBlk(sha *wire.ShaHash, blkHeight int64, buf []byte) { +func (db *LevelDb) setBlk(sha *chainhash.Hash, blkHeight int64, buf []byte) { // serialize var lw [8]byte binary.LittleEndian.PutUint64(lw[0:8], uint64(blkHeight)) @@ -145,7 +148,7 @@ func (db *LevelDb) setBlk(sha *wire.ShaHash, blkHeight int64, buf []byte) { // insertSha stores a block hash and its associated data block with a // previous sha of `prevSha'. // insertSha shall be called with db lock held -func (db *LevelDb) insertBlockData(sha *wire.ShaHash, prevSha *wire.ShaHash, buf []byte) (int64, error) { +func (db *LevelDb) insertBlockData(sha *chainhash.Hash, prevSha *chainhash.Hash, buf []byte) (int64, error) { oBlkHeight, err := db.getBlkLoc(prevSha) if err != nil { // check current block count @@ -174,7 +177,7 @@ func (db *LevelDb) insertBlockData(sha *wire.ShaHash, prevSha *wire.ShaHash, buf } // fetchSha returns the datablock for the given ShaHash. -func (db *LevelDb) fetchSha(sha *wire.ShaHash) (rbuf []byte, +func (db *LevelDb) fetchSha(sha *chainhash.Hash) (rbuf []byte, rblkHeight int64, err error) { var blkHeight int64 var buf []byte @@ -189,7 +192,7 @@ func (db *LevelDb) fetchSha(sha *wire.ShaHash) (rbuf []byte, // ExistsSha looks up the given block hash // returns true if it is present in the database. -func (db *LevelDb) ExistsSha(sha *wire.ShaHash) (bool, error) { +func (db *LevelDb) ExistsSha(sha *chainhash.Hash) (bool, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -200,7 +203,7 @@ func (db *LevelDb) ExistsSha(sha *wire.ShaHash) (bool, error) { // blkExistsSha looks up the given block hash // returns true if it is present in the database. // CALLED WITH LOCK HELD -func (db *LevelDb) blkExistsSha(sha *wire.ShaHash) (bool, error) { +func (db *LevelDb) blkExistsSha(sha *chainhash.Hash) (bool, error) { key := shaBlkToKey(sha) return db.lDb.Has(key, db.ro) @@ -208,7 +211,7 @@ func (db *LevelDb) blkExistsSha(sha *wire.ShaHash) (bool, error) { // FetchBlockShaByHeight returns a block hash based on its height in the // block chain. -func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *wire.ShaHash, err error) { +func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *chainhash.Hash, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -217,7 +220,7 @@ func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *wire.ShaHash, err e // fetchBlockShaByHeight returns a block hash based on its height in the // block chain. -func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *wire.ShaHash, err error) { +func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *chainhash.Hash, err error) { key := int64ToKey(height) blkVal, err := db.lDb.Get(key, db.ro) @@ -226,7 +229,7 @@ func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *wire.ShaHash, err return // exists ??? } - var sha wire.ShaHash + var sha chainhash.Hash sha.SetBytes(blkVal[0:32]) return &sha, nil @@ -236,7 +239,7 @@ func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *wire.ShaHash, err // heights. Fetch is inclusive of the start height and exclusive of the // ending height. To fetch all hashes from the start height until no // more are present, use the special id `AllShas'. -func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []wire.ShaHash, err error) { +func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []chainhash.Hash, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -247,7 +250,7 @@ func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []wi endidx = endHeight } - shalist := make([]wire.ShaHash, 0, endidx-startHeight) + shalist := make([]chainhash.Hash, 0, endidx-startHeight) for height := startHeight; height < endidx; height++ { // TODO(drahn) fix blkFile from height @@ -257,7 +260,7 @@ func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []wi break } - var sha wire.ShaHash + var sha chainhash.Hash sha.SetBytes(blkVal[0:32]) shalist = append(shalist, sha) } @@ -273,12 +276,12 @@ func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []wi // NewestSha returns the hash and block height of the most recent (end) block of // the block chain. It will return the zero hash, -1 for the block height, and // no error (nil) if there are not any blocks in the database yet. -func (db *LevelDb) NewestSha() (rsha *wire.ShaHash, rblkid int64, err error) { +func (db *LevelDb) NewestSha() (rsha *chainhash.Hash, rblkid int64, err error) { db.dbLock.Lock() defer db.dbLock.Unlock() if db.lastBlkIdx == -1 { - return &wire.ShaHash{}, -1, nil + return &chainhash.Hash{}, -1, nil } sha := db.lastBlkSha @@ -312,16 +315,16 @@ func (db *LevelDb) checkAddrIndexVersion() error { // updated accordingly by functions that modify the state. This function is // used on start up to load the info into memory. Callers will use the public // version of this function below, which returns our cached copy. -func (db *LevelDb) fetchAddrIndexTip() (*wire.ShaHash, int64, error) { +func (db *LevelDb) fetchAddrIndexTip() (*chainhash.Hash, int64, error) { db.dbLock.Lock() defer db.dbLock.Unlock() data, err := db.lDb.Get(addrIndexMetaDataKey, db.ro) if err != nil { - return &wire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist + return &chainhash.Hash{}, -1, database.ErrAddrIndexDoesNotExist } - var blkSha wire.ShaHash + var blkSha chainhash.Hash blkSha.SetBytes(data[0:32]) blkHeight := binary.LittleEndian.Uint64(data[32:]) @@ -333,12 +336,12 @@ func (db *LevelDb) fetchAddrIndexTip() (*wire.ShaHash, int64, error) { // block whose transactions have been indexed by address. It will return // ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the // addrindex hasn't yet been built up. -func (db *LevelDb) FetchAddrIndexTip() (*wire.ShaHash, int64, error) { +func (db *LevelDb) FetchAddrIndexTip() (*chainhash.Hash, int64, error) { db.dbLock.Lock() defer db.dbLock.Unlock() if db.lastAddrIndexBlkIdx == -1 { - return &wire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist + return &chainhash.Hash{}, -1, database.ErrAddrIndexDoesNotExist } sha := db.lastAddrIndexBlkSha diff --git a/database/ldb/boundary_test.go b/database/ldb/boundary_test.go index f09d2bc2..190ca553 100644 --- a/database/ldb/boundary_test.go +++ b/database/ldb/boundary_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,8 +9,8 @@ import ( "os" "testing" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" ) // we need to test for an empty database and make certain it returns the proper @@ -30,7 +31,7 @@ func TestEmptyDB(t *testing.T) { defer os.RemoveAll(dbnamever) sha, height, err := db.NewestSha() - if !sha.IsEqual(&wire.ShaHash{}) { + if !sha.IsEqual(&chainhash.Hash{}) { t.Errorf("sha not zero hash") } if height != -1 { @@ -54,7 +55,7 @@ func TestEmptyDB(t *testing.T) { }() sha, height, err = db.NewestSha() - if !sha.IsEqual(&wire.ShaHash{}) { + if !sha.IsEqual(&chainhash.Hash{}) { t.Errorf("sha not zero hash") } if height != -1 { diff --git a/database/ldb/dbtest/dbtst.go b/database/ldb/dbtest/dbtst.go index 582168cd..a3bc1625 100644 --- a/database/ldb/dbtest/dbtst.go +++ b/database/ldb/dbtest/dbtst.go @@ -1,4 +1,3 @@ -// package main import ( diff --git a/database/ldb/doc.go b/database/ldb/doc.go index 63b9d358..d9706ef3 100644 --- a/database/ldb/doc.go +++ b/database/ldb/doc.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/database/ldb/dup_test.go b/database/ldb/dup_test.go index 7bf6ab92..b60172c4 100644 --- a/database/ldb/dup_test.go +++ b/database/ldb/dup_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,11 +11,24 @@ import ( "path/filepath" "testing" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + //"github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) +func existsInOwnBlockRegTree(block *wire.MsgBlock, hash chainhash.Hash) bool { + for _, tx := range block.Transactions { + txH := tx.TxSha() + if txH == hash { + return true + } + } + + return false +} + func Test_dupTx(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. @@ -35,7 +49,7 @@ func Test_dupTx(t *testing.T) { } }() - testdatafile := filepath.Join("testdata", "blocks1-256.bz2") + testdatafile := filepath.Join("../", "../blockchain/testdata", "blocks0to168.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { t.Errorf("Unable to load blocks from test data for: %v", @@ -43,47 +57,99 @@ func Test_dupTx(t *testing.T) { return } - var lastSha *wire.ShaHash + var lastSha *chainhash.Hash // Populate with the fisrt 256 blocks, so we have blocks to 'mess with' err = nil out: for height := int64(0); height < int64(len(blocks)); height++ { block := blocks[height] + if height != 0 { + // except for NoVerify which does not allow lookups check inputs + mblock := block.MsgBlock() + //t.Errorf("%v", blockchain.DebugBlockString(block)) + parentBlock := blocks[height-1] + mParentBlock := parentBlock.MsgBlock() + var txneededList []*chainhash.Hash + opSpentInBlock := make(map[wire.OutPoint]struct{}) + if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) { + for _, tx := range mParentBlock.Transactions { + for _, txin := range tx.TxIn { + if txin.PreviousOutPoint.Index == uint32(4294967295) { + continue + } - // except for NoVerify which does not allow lookups check inputs - mblock := block.MsgBlock() - var txneededList []*wire.ShaHash - for _, tx := range mblock.Transactions { - for _, txin := range tx.TxIn { - if txin.PreviousOutPoint.Index == uint32(4294967295) { - continue - } - origintxsha := &txin.PreviousOutPoint.Hash - txneededList = append(txneededList, origintxsha) + if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { + _, used := opSpentInBlock[txin.PreviousOutPoint] + if !used { + // Origin tx is in the block and so hasn't been + // added yet, continue + opSpentInBlock[txin.PreviousOutPoint] = struct{}{} + continue + } else { + t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) + } + } - exists, err := db.ExistsTxSha(origintxsha) - if err != nil { - t.Errorf("ExistsTxSha: unexpected error %v ", err) - } - if !exists { - t.Errorf("referenced tx not found %v ", origintxsha) - } + origintxsha := &txin.PreviousOutPoint.Hash + txneededList = append(txneededList, origintxsha) + exists, err := db.ExistsTxSha(origintxsha) + if err != nil { + t.Errorf("ExistsTxSha: unexpected error %v ", err) + } + if !exists { + t.Errorf("referenced tx not found %v (height %v)", origintxsha, height) + } - _, err = db.FetchTxBySha(origintxsha) - if err != nil { - t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + _, err = db.FetchTxBySha(origintxsha) + if err != nil { + t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + } + } + } + } + for _, stx := range mblock.STransactions { + for _, txin := range stx.TxIn { + if txin.PreviousOutPoint.Index == uint32(4294967295) { + continue + } + if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { + _, used := opSpentInBlock[txin.PreviousOutPoint] + if !used { + // Origin tx is in the block and so hasn't been + // added yet, continue + opSpentInBlock[txin.PreviousOutPoint] = struct{}{} + continue + } else { + t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) + } + } + + origintxsha := &txin.PreviousOutPoint.Hash + txneededList = append(txneededList, origintxsha) + + exists, err := db.ExistsTxSha(origintxsha) + if err != nil { + t.Errorf("ExistsTxSha: unexpected error %v ", err) + } + if !exists { + t.Errorf("referenced tx not found %v", origintxsha) + } + + _, err = db.FetchTxBySha(origintxsha) + if err != nil { + t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + } + } + } + txlist := db.FetchUnSpentTxByShaList(txneededList) + for _, txe := range txlist { + if txe.Err != nil { + t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + break out } } } - txlist := db.FetchUnSpentTxByShaList(txneededList) - for _, txe := range txlist { - if txe.Err != nil { - t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) - break out - } - } - newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) @@ -116,15 +182,15 @@ out: var bh wire.BlockHeader - bh.Version = 2 + bh.Version = 0 bh.PrevBlock = *lastSha // Bits, Nonce are not filled in mblk := wire.NewMsgBlock(&bh) - hash, _ := wire.NewShaHashFromStr("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a") + hash, _ := chainhash.NewHashFromStr("c23953c56cb2ef8e4698e3ed3b0fc4c837754d3cd16485192d893e35f32626b4") - po := wire.NewOutPoint(hash, 0) + po := wire.NewOutPoint(hash, 0, dcrutil.TxTreeRegular) txI := wire.NewTxIn(po, []byte("garbage")) txO := wire.NewTxOut(50000000, []byte("garbageout")) @@ -134,9 +200,9 @@ out: mblk.AddTransaction(&tx) - blk := btcutil.NewBlock(mblk) + blk := dcrutil.NewBlock(mblk) - fetchList := []*wire.ShaHash{hash} + fetchList := []*chainhash.Hash{hash} listReply := db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil { @@ -154,7 +220,7 @@ out: listReply = db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { - if lr.Err != database.ErrTxShaMissing { + if lr.Err != nil && lr.Err != database.ErrTxShaMissing { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } @@ -176,8 +242,6 @@ out: } } - t.Logf("Dropping block") - err = db.DropAfterBlockBySha(lastSha) if err != nil { t.Errorf("failed to drop spending block %v", err) diff --git a/database/ldb/insertremove_test.go b/database/ldb/insertremove_test.go index ea635b91..2e87b02b 100644 --- a/database/ldb/insertremove_test.go +++ b/database/ldb/insertremove_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,20 +11,21 @@ import ( "path/filepath" "testing" - "github.com/btcsuite/btcd/database" - _ "github.com/btcsuite/btcd/database/ldb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + _ "github.com/decred/dcrd/database/ldb" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) -var tstBlocks []*btcutil.Block +var tstBlocks []*dcrutil.Block -func loadblocks(t *testing.T) []*btcutil.Block { +func loadblocks(t *testing.T) []*dcrutil.Block { if len(tstBlocks) != 0 { return tstBlocks } - testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2") + testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { t.Errorf("Unable to load blocks from test data: %v", err) @@ -34,7 +36,8 @@ func loadblocks(t *testing.T) []*btcutil.Block { } func TestUnspentInsert(t *testing.T) { - testUnspentInsert(t) + testUnspentInsertStakeTree(t) + testUnspentInsertRegTree(t) } // insert every block in the test chain @@ -42,7 +45,8 @@ func TestUnspentInsert(t *testing.T) { // block and verify that the the tx is spent/unspent // new tx should be fully unspent, referenced tx should have // the associated txout set to spent. -func testUnspentInsert(t *testing.T) { +// checks tx tree stake only +func testUnspentInsertStakeTree(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbuspnt1") dbnamever := dbname + ".ver" @@ -60,47 +64,83 @@ func testUnspentInsert(t *testing.T) { t.Errorf("Close: unexpected error: %v", err) } }() - blocks := loadblocks(t) endtest: - for height := int64(0); height < int64(len(blocks)); height++ { - + for height := int64(0); height < int64(len(blocks))-1; height++ { block := blocks[height] - // look up inputs to this tx - mblock := block.MsgBlock() - var txneededList []*wire.ShaHash - var txlookupList []*wire.ShaHash - var txOutList []*wire.ShaHash + + var txneededList []*chainhash.Hash + var txlookupList []*chainhash.Hash + var txOutList []*chainhash.Hash var txInList []*wire.OutPoint - for _, tx := range mblock.Transactions { + spentFromParent := make(map[wire.OutPoint]struct{}) + for _, tx := range block.MsgBlock().STransactions { for _, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } origintxsha := &txin.PreviousOutPoint.Hash - txInList = append(txInList, &txin.PreviousOutPoint) - txneededList = append(txneededList, origintxsha) - txlookupList = append(txlookupList, origintxsha) - exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { - t.Errorf("referenced tx not found %v ", origintxsha) + // Check and see if the outpoint references txtreeregular of + // the previous block. If it does, make sure nothing in tx + // treeregular spends it in flight. Then check make sure it's + // not currently spent for this block. If it isn't, mark it + // spent and skip lookup in the db below, since the db won't + // yet be able to add it as it's still to be inserted. + spentFromParentReg := false + parent := blocks[height-1] + parentValid := dcrutil.IsFlagSet16(dcrutil.BlockValid, block.MsgBlock().Header.VoteBits) + if parentValid { + for _, prtx := range parent.Transactions() { + // Check and make sure it's not being spent in this tx + // tree first by an in flight tx. Mark it spent if it + // is so it fails the check below. + for _, prtxCheck := range parent.Transactions() { + for _, prTxIn := range prtxCheck.MsgTx().TxIn { + if prTxIn.PreviousOutPoint == txin.PreviousOutPoint { + spentFromParent[txin.PreviousOutPoint] = struct{}{} + } + } + } + + // If it is in the tree, make sure it's not already spent + // somewhere else and mark it spent. Set the flag below + // so we skip lookup. + if prtx.Sha().IsEqual(origintxsha) { + if _, spent := spentFromParent[txin.PreviousOutPoint]; !spent { + spentFromParent[txin.PreviousOutPoint] = struct{}{} + spentFromParentReg = true + } + } + } + } + + if !spentFromParentReg { + t.Errorf("referenced tx not found %v %v", origintxsha, height) + } else { + continue + } } + + txInList = append(txInList, &txin.PreviousOutPoint) + txneededList = append(txneededList, origintxsha) + txlookupList = append(txlookupList, origintxsha) } txshaname := tx.TxSha() txlookupList = append(txlookupList, &txshaname) txOutList = append(txOutList, &txshaname) } - txneededmap := map[wire.ShaHash]*database.TxListReply{} + txneededmap := map[chainhash.Hash]*database.TxListReply{} txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { - t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + t.Errorf("tx list fetch failed %v err %v", txe.Sha, txe.Err) break endtest } txneededmap[*txe.Sha] = txe @@ -121,12 +161,12 @@ endtest: t.Errorf("height mismatch expect %v returned %v", height, newheight) break endtest } - - txlookupmap := map[wire.ShaHash]*database.TxListReply{} + // only check transactions if current block is valid + txlookupmap := map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { - t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err, height) break endtest } txlookupmap[*txe.Sha] = txe @@ -141,7 +181,7 @@ endtest: itxe := txlookupmap[*txo] for i, spent := range itxe.TxSpent { if spent == true { - t.Errorf("freshly inserted tx %v already spent %v", txo, i) + t.Errorf("height: %v freshly inserted tx %v already spent %v", height, txo, i) } } @@ -157,7 +197,7 @@ endtest: break endtest } - txlookupmap = map[wire.ShaHash]*database.TxListReply{} + txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchUnSpentTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { @@ -179,7 +219,275 @@ endtest: t.Errorf("failed to insert block %v err %v", height, err) break endtest } - txlookupmap = map[wire.ShaHash]*database.TxListReply{} + txlookupmap = map[chainhash.Hash]*database.TxListReply{} + txlist = db.FetchTxByShaList(txlookupList) + for _, txe := range txlist { + if txe.Err != nil { + t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + break endtest + } + txlookupmap[*txe.Sha] = txe + } + for _, spend := range txInList { + itxe := txlookupmap[spend.Hash] + if itxe.TxSpent[spend.Index] == false { + t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) + } + } + } +} + +// getRegTreeOpsSpentBeforeThisOp returns all outpoints spent before this one +// in the block's tx tree regular. used for checking vs in flight tx. +func getRegTreeOpsSpentBeforeThisOp(block *dcrutil.Block, idx int, txinIdx int) map[wire.OutPoint]struct{} { + spentOps := make(map[wire.OutPoint]struct{}) + + thisTx := block.Transactions()[idx] + for i, txIn := range thisTx.MsgTx().TxIn { + if i < txinIdx { + spentOps[txIn.PreviousOutPoint] = struct{}{} + } + } + + for i, tx := range block.Transactions() { + if i < idx { + for _, txIn := range tx.MsgTx().TxIn { + spentOps[txIn.PreviousOutPoint] = struct{}{} + } + } + } + + return spentOps +} + +// unspendInflightTxTree returns all outpoints spent that reference internal +// transactions in a TxTreeRegular. +func unspendInflightTxTree(block *dcrutil.Block) map[wire.OutPoint]struct{} { + unspentOps := make(map[wire.OutPoint]struct{}) + allTxHashes := make(map[chainhash.Hash]struct{}) + for _, tx := range block.Transactions() { + h := tx.Sha() + allTxHashes[*h] = struct{}{} + } + + for _, tx := range block.Transactions() { + for _, txIn := range tx.MsgTx().TxIn { + if _, isLocal := allTxHashes[txIn.PreviousOutPoint.Hash]; isLocal { + unspentOps[txIn.PreviousOutPoint] = struct{}{} + } + } + } + + return unspentOps +} + +// unspendStakeTxTree returns all outpoints spent before this one +// in the block's tx tree stake. used for unspending the stake tx +// tree to evaluate tx tree regular of prev block. +func unspendStakeTxTree(block *dcrutil.Block) map[wire.OutPoint]struct{} { + unspentOps := make(map[wire.OutPoint]struct{}) + + for _, tx := range block.STransactions() { + for _, txIn := range tx.MsgTx().TxIn { + unspentOps[txIn.PreviousOutPoint] = struct{}{} + } + } + + return unspentOps +} + +// insert every block in the test chain +// after each insert, fetch all the tx affected by the latest +// block and verify that the the tx is spent/unspent +// new tx should be fully unspent, referenced tx should have +// the associated txout set to spent. +// checks tx tree regular only +func testUnspentInsertRegTree(t *testing.T) { + // Ignore db remove errors since it means we didn't have an old one. + dbname := fmt.Sprintf("tstdbuspnt1") + dbnamever := dbname + ".ver" + _ = os.RemoveAll(dbname) + _ = os.RemoveAll(dbnamever) + db, err := database.CreateDB("leveldb", dbname) + if err != nil { + t.Errorf("Failed to open test database %v", err) + return + } + defer os.RemoveAll(dbname) + defer os.RemoveAll(dbnamever) + defer func() { + if err := db.Close(); err != nil { + t.Errorf("Close: unexpected error: %v", err) + } + }() + blocks := loadblocks(t) +endtest: + for height := int64(0); height < int64(len(blocks))-1; height++ { + block := blocks[height] + + // jam in genesis block + if height == 0 { + _, err := db.InsertBlock(block) + if err != nil { + t.Errorf("failed to insert block %v err %v", height, err) + break endtest + } + continue + } + + var txneededList []*chainhash.Hash + var txlookupList []*chainhash.Hash + var txOutList []*chainhash.Hash + var txInList []*wire.OutPoint + parent := blocks[height-1] + unspentStakeOps := unspendStakeTxTree(block) + + // Check regular tree of parent and make sure it's ok + for txIdx, tx := range parent.MsgBlock().Transactions { + for txinIdx, txin := range tx.TxIn { + if txin.PreviousOutPoint.Index == uint32(4294967295) { + continue + } + + origintxsha := &txin.PreviousOutPoint.Hash + + exists, err := db.ExistsTxSha(origintxsha) + if err != nil { + t.Errorf("ExistsTxSha: unexpected error %v ", err) + } + if !exists { + // Check and see if something in flight spends it from this + // tx tree. We can skip looking for this transaction OP + // if that's the case. + spentFromParentReg := false + alreadySpentOps := getRegTreeOpsSpentBeforeThisOp(parent, + txIdx, txinIdx) + _, alreadySpent := alreadySpentOps[txin.PreviousOutPoint] + if !alreadySpent { + spentFromParentReg = true + } + + if !spentFromParentReg { + t.Errorf("referenced tx not found %v %v", origintxsha, + height) + } else { + continue + } + } + + txInList = append(txInList, &txin.PreviousOutPoint) + txneededList = append(txneededList, origintxsha) + txlookupList = append(txlookupList, origintxsha) + } + txshaname := tx.TxSha() + txlookupList = append(txlookupList, &txshaname) + txOutList = append(txOutList, &txshaname) + } + + txneededmap := map[chainhash.Hash]*database.TxListReply{} + txlist := db.FetchUnSpentTxByShaList(txneededList) + for _, txe := range txlist { + if txe.Err != nil { + t.Errorf("tx list fetch failed %v err %v", txe.Sha, txe.Err) + break endtest + } + txneededmap[*txe.Sha] = txe + } + for _, spend := range txInList { + itxe := txneededmap[spend.Hash] + if itxe.TxSpent[spend.Index] == true { + t.Errorf("txin %v:%v is already spent", spend.Hash, spend.Index) + } + } + + newheight, err := db.InsertBlock(block) + if err != nil { + t.Errorf("failed to insert block %v err %v", height, err) + break endtest + } + if newheight != height { + t.Errorf("height mismatch expect %v returned %v", height, newheight) + break endtest + } + // only check transactions if current block validates parent block + if !dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) { + continue + } + + txlookupmap := map[chainhash.Hash]*database.TxListReply{} + txlist = db.FetchTxByShaList(txlookupList) + for _, txe := range txlist { + if txe.Err != nil { + t.Errorf("tx list fetch failed %v err %v (height %v)", txe.Sha, + txe.Err, height) + break endtest + } + txlookupmap[*txe.Sha] = txe + } + for _, spend := range txInList { + itxe := txlookupmap[spend.Hash] + if itxe.TxSpent[spend.Index] == false { + t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) + } + } + + alreadySpentOps := unspendInflightTxTree(parent) + for _, txo := range txOutList { + itxe := txlookupmap[*txo] + for i, spent := range itxe.TxSpent { + if spent == true { + // If this was spent in flight, skip + thisOP := wire.OutPoint{*txo, uint32(i), dcrutil.TxTreeRegular} + _, alreadySpent := alreadySpentOps[thisOP] + if alreadySpent { + continue + } + + // If it was spent in the stake tree it's actually unspent too + _, wasSpentInStakeTree := unspentStakeOps[thisOP] + if wasSpentInStakeTree { + continue + } + + t.Errorf("height: %v freshly inserted tx %v already spent %v", height, txo, i) + } + } + + } + if len(txInList) == 0 { + continue + } + dropblock := blocks[height-1] + + err = db.DropAfterBlockBySha(dropblock.Sha()) + if err != nil { + t.Errorf("failed to drop block %v err %v", height, err) + break endtest + } + + txlookupmap = map[chainhash.Hash]*database.TxListReply{} + txlist = db.FetchUnSpentTxByShaList(txlookupList) + for _, txe := range txlist { + if txe.Err != nil { + if _, ok := txneededmap[*txe.Sha]; ok { + t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + break endtest + } + } + txlookupmap[*txe.Sha] = txe + } + for _, spend := range txInList { + itxe := txlookupmap[spend.Hash] + if itxe.TxSpent[spend.Index] == true { + t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) + } + } + newheight, err = db.InsertBlock(block) + if err != nil { + t.Errorf("failed to insert block %v err %v", height, err) + break endtest + } + txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { diff --git a/database/ldb/internal_test.go b/database/ldb/internal_test.go index a4242c21..8a772d70 100644 --- a/database/ldb/internal_test.go +++ b/database/ldb/internal_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,44 +10,46 @@ import ( "testing" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/database" + "github.com/decred/dcrutil" + "github.com/btcsuite/golangcrypto/ripemd160" ) func TestAddrIndexKeySerialization(t *testing.T) { var hash160Bytes [ripemd160.Size]byte - var packedIndex [12]byte + var packedIndex [35]byte - fakeHash160 := btcutil.Hash160([]byte("testing")) + fakeHash160 := dcrutil.Hash160([]byte("testing")) copy(fakeHash160, hash160Bytes[:]) - fakeIndex := txAddrIndex{ - hash160: hash160Bytes, - blkHeight: 1, - txoffset: 5, - txlen: 360, + fakeIndex := database.TxAddrIndex{ + Hash160: hash160Bytes, + Height: 1, + TxOffset: 5, + TxLen: 360, } serializedKey := addrIndexToKey(&fakeIndex) - copy(packedIndex[:], serializedKey[23:35]) + copy(packedIndex[:], serializedKey[0:35]) unpackedIndex := unpackTxIndex(packedIndex) - if unpackedIndex.blkHeight != fakeIndex.blkHeight { + if unpackedIndex.Height != fakeIndex.Height { t.Errorf("Incorrect block height. Unpack addr index key"+ "serialization failed. Expected %d, received %d", - 1, unpackedIndex.blkHeight) + 1, unpackedIndex.Height) } - if unpackedIndex.txoffset != fakeIndex.txoffset { + if unpackedIndex.TxOffset != fakeIndex.TxOffset { t.Errorf("Incorrect tx offset. Unpack addr index key"+ "serialization failed. Expected %d, received %d", - 5, unpackedIndex.txoffset) + 5, unpackedIndex.TxOffset) } - if unpackedIndex.txlen != fakeIndex.txlen { + if unpackedIndex.TxLen != fakeIndex.TxLen { t.Errorf("Incorrect tx len. Unpack addr index key"+ "serialization failed. Expected %d, received %d", - 360, unpackedIndex.txlen) + 360, unpackedIndex.TxLen) } } diff --git a/database/ldb/leveldb.go b/database/ldb/leveldb.go index 44d24005..5b73306f 100644 --- a/database/ldb/leveldb.go +++ b/database/ldb/leveldb.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,12 +12,13 @@ import ( "strconv" "sync" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btclog" - "github.com/btcsuite/btcutil" "github.com/btcsuite/goleveldb/leveldb" "github.com/btcsuite/goleveldb/leveldb/opt" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -28,7 +30,7 @@ const ( var log = btclog.Disabled type tTxInsertData struct { - txsha *wire.ShaHash + txsha *chainhash.Hash blockid int64 txoff int txlen int @@ -50,14 +52,14 @@ type LevelDb struct { nextBlock int64 lastBlkShaCached bool - lastBlkSha wire.ShaHash + lastBlkSha chainhash.Hash lastBlkIdx int64 - lastAddrIndexBlkSha wire.ShaHash + lastAddrIndexBlkSha chainhash.Hash lastAddrIndexBlkIdx int64 - txUpdateMap map[wire.ShaHash]*txUpdateObj - txSpentUpdateMap map[wire.ShaHash]*spentTxUpdate + txUpdateMap map[chainhash.Hash]*txUpdateObj + txSpentUpdateMap map[chainhash.Hash]*spentTxUpdate } var self = database.DriverDB{DbType: "leveldb", CreateDB: CreateDB, OpenDB: OpenDB} @@ -103,7 +105,7 @@ func OpenDB(args ...interface{}) (database.Db, error) { increment := int64(100000) ldb := db.(*LevelDb) - var lastSha *wire.ShaHash + var lastSha *chainhash.Hash // forward scan blockforward: for { @@ -119,7 +121,7 @@ blockforward: //no blocks in db, odd but ok. lastknownblock = -1 nextunknownblock = 0 - var emptysha wire.ShaHash + var emptysha chainhash.Hash lastSha = &emptysha } else { nextunknownblock = testblock @@ -144,18 +146,16 @@ blocknarrow: } } - log.Infof("Checking address index") - // Load the last block whose transactions have been indexed by address. if sha, idx, err := ldb.fetchAddrIndexTip(); err == nil { if err = ldb.checkAddrIndexVersion(); err == nil { ldb.lastAddrIndexBlkSha = *sha ldb.lastAddrIndexBlkIdx = idx - log.Infof("Address index good, continuing") + log.Infof("Address index synced and loaded to height %v", idx) } else { log.Infof("Address index in old, incompatible format, dropping...") ldb.deleteOldAddrIndex() - ldb.DeleteAddrIndex() + ldb.PurgeAddrIndex() log.Infof("Old, incompatible address index dropped and can now be rebuilt") } } else { @@ -178,8 +178,8 @@ func openDB(dbpath string, create bool) (pbdb database.Db, err error) { if err == nil { db.lDb = tlDb - db.txUpdateMap = map[wire.ShaHash]*txUpdateObj{} - db.txSpentUpdateMap = make(map[wire.ShaHash]*spentTxUpdate) + db.txUpdateMap = map[chainhash.Hash]*txUpdateObj{} + db.txSpentUpdateMap = make(map[chainhash.Hash]*spentTxUpdate) pbdb = &db } @@ -301,7 +301,7 @@ func (db *LevelDb) Close() error { // DropAfterBlockBySha will remove any blocks from the database after // the given block. -func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) { +func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { @@ -322,28 +322,63 @@ func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) { } for height := startheight; height > keepidx; height = height - 1 { - var blk *btcutil.Block + var blk *dcrutil.Block blksha, buf, err := db.getBlkByHeight(height) if err != nil { return err } - blk, err = btcutil.NewBlockFromBytes(buf) + + blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return err } - for _, tx := range blk.MsgBlock().Transactions { + // Obtain previous block sha and buffer + var blkprev *dcrutil.Block + _, bufprev, errprev := db.getBlkByHeight(height - 1) // discard blkshaprev + if errprev != nil { + return errprev + } + + // Do the same thing for the parent block + blkprev, errprev = dcrutil.NewBlockFromBytes(bufprev) + if errprev != nil { + return errprev + } + + // Unspend the stake tx in the current block + for _, tx := range blk.MsgBlock().STransactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. - for _, tx := range blk.Transactions() { + for _, tx := range blk.STransactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } + + // Check to see if the regular txs of the parent were even included; if + // they are, unspend all of these regular tx too + votebits := blk.MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { + // Unspend the regular tx in the current block + for _, tx := range blkprev.MsgBlock().Transactions { + err = db.unSpend(tx) + if err != nil { + return err + } + } + // rather than iterate the list of tx backward, do it twice. + for _, tx := range blkprev.Transactions() { + var txUo txUpdateObj + txUo.delete = true + db.txUpdateMap[*tx.Sha()] = &txUo + } + } + db.lBatch().Delete(shaBlkToKey(blksha)) db.lBatch().Delete(int64ToKey(height)) } @@ -361,7 +396,27 @@ func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) { // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. -func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) { +func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) { + // Be careful with this function on syncs. It contains decred changes. + + // Obtain the previous block first so long as it's not the genesis block + var blockPrev *dcrutil.Block = nil + + // Decred: WARNING. This function assumes that all block insertion calls have + // dcrutil.blocks passed to them with block.blockHeight set correctly. However, + // loading the genesis block in btcd didn't do this (via block manager); pre- + // production it should be established that all calls to this function pass + // blocks with block.blockHeight set correctly. + if block.Height() != 0 { + var errBlockPrev error + blockPrev, errBlockPrev = db.FetchBlockBySha(&block.MsgBlock().Header.PrevBlock) + if errBlockPrev != nil { + blockSha := block.Sha() + log.Warnf("Failed to fetch parent block of block %v", blockSha) + return 0, errBlockPrev + } + } + db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { @@ -379,9 +434,9 @@ func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } - txloc, err := block.TxLoc() + _, sTxLoc, err := block.TxLoc() if err != nil { - log.Warnf("Failed to obtain raw block sha %v", blocksha) + log.Warnf("Failed to obtain raw block sha %v, stxloc %v", blocksha, sTxLoc) return 0, err } @@ -394,81 +449,94 @@ func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) return 0, err } - // At least two blocks in the long past were generated by faulty - // miners, the sha of the transaction exists in a previous block, - // detect this condition and 'accept' the block. - for txidx, tx := range mblock.Transactions { - txsha, err := block.TxSha(txidx) + // Get data necessary to process regular tx tree of parent block if it's not + // the genesis block. + var mBlockPrev *wire.MsgBlock = nil + var txLoc []wire.TxLoc + + if blockPrev != nil { + blockShaPrev := blockPrev.Sha() + + mBlockPrev = blockPrev.MsgBlock() + + txLoc, _, err = blockPrev.TxLoc() if err != nil { - log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) - return 0, err - } - spentbuflen := (len(tx.TxOut) + 7) / 8 - spentbuf := make([]byte, spentbuflen, spentbuflen) - if len(tx.TxOut)%8 != 0 { - for i := uint(len(tx.TxOut) % 8); i < 8; i++ { - spentbuf[spentbuflen-1] |= (byte(1) << i) - } - } - - err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf) - if err != nil { - log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) - return 0, err - } - - // Some old blocks contain duplicate transactions - // Attempt to cleanly bypass this problem by marking the - // first as fully spent. - // http://blockexplorer.com/b/91812 dup in 91842 - // http://blockexplorer.com/b/91722 dup in 91880 - if newheight == 91812 { - dupsha, err := wire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599") - if err != nil { - panic("invalid sha string in source") - } - if txsha.IsEqual(dupsha) { - // marking TxOut[0] as spent - po := wire.NewOutPoint(dupsha, 0) - txI := wire.NewTxIn(po, []byte("garbage")) - - var spendtx wire.MsgTx - spendtx.AddTxIn(txI) - err = db.doSpend(&spendtx) - if err != nil { - log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) - } - } - } - if newheight == 91722 { - dupsha, err := wire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468") - if err != nil { - panic("invalid sha string in source") - } - if txsha.IsEqual(dupsha) { - // marking TxOut[0] as spent - po := wire.NewOutPoint(dupsha, 0) - txI := wire.NewTxIn(po, []byte("garbage")) - - var spendtx wire.MsgTx - spendtx.AddTxIn(txI) - err = db.doSpend(&spendtx) - if err != nil { - log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) - } - } - } - - err = db.doSpend(tx) - if err != nil { - log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) + log.Warnf("Failed to obtain raw block sha %v, txloc %v", blockShaPrev, txLoc) return 0, err } } + + // Insert the regular tx of the parent block into the tx database if the vote + // bits enable it, and if it's not the genesis block. + votebits := mblock.Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + for txidx, tx := range mBlockPrev.Transactions { + txsha, err := blockPrev.TxSha(txidx) + + if err != nil { + log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) + return 0, err + } + spentbuflen := (len(tx.TxOut) + 7) / 8 + spentbuf := make([]byte, spentbuflen, spentbuflen) + if len(tx.TxOut)%8 != 0 { + for i := uint(len(tx.TxOut) % 8); i < 8; i++ { + spentbuf[spentbuflen-1] |= (byte(1) << i) + } + } + + // newheight-1 instead of newheight below, as the tx is actually found + // in the parent. + //fmt.Printf("insert tx %v into db at height %v\n", txsha, newheight) + err = db.insertTx(txsha, newheight-1, uint32(txidx), txLoc[txidx].TxStart, txLoc[txidx].TxLen, spentbuf) + if err != nil { + log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight-1, &txsha, txidx, err) + return 0, err + } + + err = db.doSpend(tx) + if err != nil { + log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) + return 0, err + } + } + } + + // Insert the stake tx of the current block into the tx database. + if len(mblock.STransactions) != 0 { + for txidx, tx := range mblock.STransactions { + txsha, err := block.STxSha(txidx) + + if err != nil { + log.Warnf("failed to compute stake tx name block %v idx %v err %v", blocksha, txidx, err) + return 0, err + } + spentbuflen := (len(tx.TxOut) + 7) / 8 + spentbuf := make([]byte, spentbuflen, spentbuflen) + if len(tx.TxOut)%8 != 0 { + for i := uint(len(tx.TxOut) % 8); i < 8; i++ { + spentbuf[spentbuflen-1] |= (byte(1) << i) + } + } + + err = db.insertTx(txsha, newheight, uint32(txidx), sTxLoc[txidx].TxStart, sTxLoc[txidx].TxLen, spentbuf) + if err != nil { + log.Warnf("block %v idx %v failed to insert stake tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) + return 0, err + } + + err = db.doSpend(tx) + if err != nil { + log.Warnf("block %v idx %v failed to spend stx %v %v err %v", blocksha, newheight, txsha, txidx, err) + return 0, err + } + } + } + return newheight, nil } -// doSpend iterates all TxIn in a bitcoin transaction marking each associated +// doSpend iterates all TxIn in a decred transaction marking each associated // TxOut as spent. func (db *LevelDb) doSpend(tx *wire.MsgTx) error { for txinidx := range tx.TxIn { @@ -481,8 +549,6 @@ func (db *LevelDb) doSpend(tx *wire.MsgTx) error { continue } - //log.Infof("spending %v %v", &inTxSha, inTxidx) - err := db.setSpentData(&inTxSha, inTxidx) if err != nil { return err @@ -491,7 +557,7 @@ func (db *LevelDb) doSpend(tx *wire.MsgTx) error { return nil } -// unSpend iterates all TxIn in a bitcoin transaction marking each associated +// unSpend iterates all TxIn in a decred transaction marking each associated // TxOut as unspent. func (db *LevelDb) unSpend(tx *wire.MsgTx) error { for txinidx := range tx.TxIn { @@ -512,25 +578,39 @@ func (db *LevelDb) unSpend(tx *wire.MsgTx) error { return nil } -func (db *LevelDb) setSpentData(sha *wire.ShaHash, idx uint32) error { +func (db *LevelDb) setSpentData(sha *chainhash.Hash, idx uint32) error { return db.setclearSpentData(sha, idx, true) } -func (db *LevelDb) clearSpentData(sha *wire.ShaHash, idx uint32) error { +func (db *LevelDb) clearSpentData(sha *chainhash.Hash, idx uint32) error { return db.setclearSpentData(sha, idx, false) } -func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) error { +func (db *LevelDb) setclearSpentData(txsha *chainhash.Hash, idx uint32, set bool) error { var txUo *txUpdateObj var ok bool if txUo, ok = db.txUpdateMap[*txsha]; !ok { // not cached, load from db var txU txUpdateObj - blkHeight, txOff, txLen, spentData, err := db.getTxData(txsha) + blkHeight, blkIndex, txOff, txLen, spentData, err := db.getTxData(txsha) if err != nil { // setting a fully spent tx is an error. if set == true { + log.Warnf("setclearSpentData attempted to set fully spent tx "+ + "%v %v %v", + txsha, + idx, + set) + + // if we are clearing a tx and it wasn't found + // in the tx table, it could be in the fully spent + // (duplicates) table. + _, err := db.getTxFullySpent(txsha) + if err != nil { + log.Warnf("getTxFullySpent couldn't find the tx either: %v", + err.Error()) + } return err } // if we are clearing a tx and it wasn't found @@ -538,6 +618,12 @@ func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) // (duplicates) table. spentTxList, err := db.getTxFullySpent(txsha) if err != nil { + log.Warnf("encountered setclearSpentData error for tx hash %v "+ + "idx %v set %v: getTxFullySpent returned %v", + txsha, + idx, + set, + err.Error()) return err } @@ -555,6 +641,7 @@ func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) // Create 'new' Tx update data. blkHeight = sTx.blkHeight + blkIndex = sTx.blkIndex txOff = sTx.txoff txLen = sTx.txlen spentbuflen := (sTx.numTxO + 7) / 8 @@ -566,6 +653,7 @@ func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) txU.txSha = txsha txU.blkHeight = blkHeight + txU.blkIndex = blkIndex txU.txoff = txOff txU.txlen = txLen txU.spentData = spentData @@ -608,6 +696,7 @@ func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) // Fill in spentTx var sTx spentTx sTx.blkHeight = txUo.blkHeight + sTx.blkIndex = txUo.blkIndex sTx.txoff = txUo.txoff sTx.txlen = txUo.txlen // XXX -- there is no way to comput the real TxOut @@ -636,7 +725,7 @@ func int64ToKey(keyint int64) []byte { return []byte(key) } -func shaBlkToKey(sha *wire.ShaHash) []byte { +func shaBlkToKey(sha *chainhash.Hash) []byte { return sha[:] } @@ -645,14 +734,14 @@ func shaBlkToKey(sha *wire.ShaHash) []byte { var recordSuffixTx = []byte{'t', 'x'} var recordSuffixSpentTx = []byte{'s', 'x'} -func shaTxToKey(sha *wire.ShaHash) []byte { +func shaTxToKey(sha *chainhash.Hash) []byte { key := make([]byte, len(sha)+len(recordSuffixTx)) copy(key, sha[:]) copy(key[len(sha):], recordSuffixTx) return key } -func shaSpentTxToKey(sha *wire.ShaHash) []byte { +func shaSpentTxToKey(sha *chainhash.Hash) []byte { key := make([]byte, len(sha)+len(recordSuffixSpentTx)) copy(key, sha[:]) copy(key[len(sha):], recordSuffixSpentTx) @@ -679,10 +768,10 @@ func (db *LevelDb) processBatches() error { for txSha, txU := range db.txUpdateMap { key := shaTxToKey(&txSha) if txU.delete { - //log.Tracef("deleting tx %v", txSha) + log.Tracef("deleting tx %v", txSha) db.lbatch.Delete(key) } else { - //log.Tracef("inserting tx %v", txSha) + log.Tracef("inserting tx %v", txSha) txdat := db.formatTx(txU) db.lbatch.Put(key, txdat) } @@ -690,10 +779,10 @@ func (db *LevelDb) processBatches() error { for txSha, txSu := range db.txSpentUpdateMap { key := shaSpentTxToKey(&txSha) if txSu.delete { - //log.Tracef("deleting tx %v", txSha) + log.Tracef("deleting tx %v", txSha) db.lbatch.Delete(key) } else { - //log.Tracef("inserting tx %v", txSha) + log.Tracef("inserting tx %v", txSha) txdat := db.formatTxFullySpent(txSu.txl) db.lbatch.Put(key, txdat) } @@ -704,8 +793,8 @@ func (db *LevelDb) processBatches() error { log.Tracef("batch failed %v\n", err) return err } - db.txUpdateMap = map[wire.ShaHash]*txUpdateObj{} - db.txSpentUpdateMap = make(map[wire.ShaHash]*spentTxUpdate) + db.txUpdateMap = map[chainhash.Hash]*txUpdateObj{} + db.txSpentUpdateMap = make(map[chainhash.Hash]*spentTxUpdate) } return nil diff --git a/database/ldb/operational_test.go b/database/ldb/operational_test.go index 6767dc57..0dfbe068 100644 --- a/database/ldb/operational_test.go +++ b/database/ldb/operational_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,19 +8,20 @@ package ldb_test import ( "bytes" "compress/bzip2" - "encoding/binary" - "io" + "encoding/gob" "os" "path/filepath" - "strings" "testing" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" + "github.com/btcsuite/golangcrypto/ripemd160" + // "github.com/davecgh/go-spew/spew" ) var network = wire.MainNet @@ -29,7 +31,7 @@ var network = wire.MainNet // consistency across tests. type testDb struct { db database.Db - blocks []*btcutil.Block + blocks []*dcrutil.Block dbName string dbNameVer string cleanUpFunc func() @@ -45,7 +47,7 @@ func setUpTestDb(t *testing.T, dbname string) (*testDb, error) { return nil, err } - testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2") + testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { return nil, err @@ -72,14 +74,14 @@ func TestOperational(t *testing.T) { // testAddrIndexOperations ensures that all normal operations concerning // the optional address index function correctly. -func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil.Block, newestSha *wire.ShaHash, newestBlockIdx int64) { +func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) { // Metadata about the current addr index state should be unset. sha, height, err := db.FetchAddrIndexTip() if err != database.ErrAddrIndexDoesNotExist { t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.") } - var zeroHash wire.ShaHash + var zeroHash chainhash.Hash if !sha.IsEqual(&zeroHash) { t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash) @@ -90,7 +92,7 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil. } // Test enforcement of constraints for "limit" and "skip" - var fakeAddr btcutil.Address + var fakeAddr dcrutil.Address _, err = db.FetchTxsForAddr(fakeAddr, -1, 0) if err == nil { t.Fatalf("Negative value for skip passed, should return an error") @@ -102,7 +104,7 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil. } // Simple test to index outputs(s) of the first tx. - testIndex := make(database.BlockAddrIndex) + testIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize) testTx, err := newestBlock.Tx(0) if err != nil { t.Fatalf("Block has no transactions, unable to test addr "+ @@ -110,19 +112,26 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil. } // Extract the dest addr from the tx. - _, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams) + _, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams) if err != nil { t.Fatalf("Unable to decode tx output, err %v", err) } // Extract the hash160 from the output script. var hash160Bytes [ripemd160.Size]byte - testHash160 := testAddrs[0].(*btcutil.AddressPubKey).AddressPubKeyHash().ScriptAddress() + testHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160() copy(hash160Bytes[:], testHash160[:]) // Create a fake index. - blktxLoc, _ := newestBlock.TxLoc() - testIndex[hash160Bytes] = []*wire.TxLoc{&blktxLoc[0]} + blktxLoc, _, _ := newestBlock.TxLoc() + testIndex = []*database.TxAddrIndex{ + &database.TxAddrIndex{ + hash160Bytes, + uint32(newestBlockIdx), + uint32(blktxLoc[0].TxStart), + uint32(blktxLoc[0].TxLen), + }, + } // Insert our test addr index into the DB. err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex) @@ -165,7 +174,7 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil. assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx) // Delete the entire index. - err = db.DeleteAddrIndex() + err = db.PurgeAddrIndex() if err != nil { t.Fatalf("Couldn't delete address index, err %v", err) } @@ -188,7 +197,7 @@ func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil. } -func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *wire.ShaHash, newestBlockIdx int64) { +func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) { // Safe to ignore error, since height will be < 0 in "error" case. sha, height, _ := db.FetchAddrIndexTip() if newestBlockIdx != height { @@ -217,38 +226,92 @@ func testOperationalMode(t *testing.T) { out: for height := int64(0); height < int64(len(testDb.blocks)); height++ { block := testDb.blocks[height] - mblock := block.MsgBlock() - var txneededList []*wire.ShaHash - for _, tx := range mblock.Transactions { - for _, txin := range tx.TxIn { - if txin.PreviousOutPoint.Index == uint32(4294967295) { - continue - } - origintxsha := &txin.PreviousOutPoint.Hash - txneededList = append(txneededList, origintxsha) + if height != 0 { + // except for NoVerify which does not allow lookups check inputs + mblock := block.MsgBlock() + //t.Errorf("%v", blockchain.DebugBlockString(block)) + parentBlock := testDb.blocks[height-1] + mParentBlock := parentBlock.MsgBlock() + var txneededList []*chainhash.Hash + opSpentInBlock := make(map[wire.OutPoint]struct{}) + if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) { + for _, tx := range mParentBlock.Transactions { + for _, txin := range tx.TxIn { + if txin.PreviousOutPoint.Index == uint32(4294967295) { + continue + } - exists, err := testDb.db.ExistsTxSha(origintxsha) - if err != nil { - t.Errorf("ExistsTxSha: unexpected error %v ", err) - } - if !exists { - t.Errorf("referenced tx not found %v ", origintxsha) - } + if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { + _, used := opSpentInBlock[txin.PreviousOutPoint] + if !used { + // Origin tx is in the block and so hasn't been + // added yet, continue + opSpentInBlock[txin.PreviousOutPoint] = struct{}{} + continue + } else { + t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) + } + } - _, err = testDb.db.FetchTxBySha(origintxsha) - if err != nil { - t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + origintxsha := &txin.PreviousOutPoint.Hash + txneededList = append(txneededList, origintxsha) + exists, err := testDb.db.ExistsTxSha(origintxsha) + if err != nil { + t.Errorf("ExistsTxSha: unexpected error %v ", err) + } + if !exists { + t.Errorf("referenced tx not found %v (height %v)", origintxsha, height) + } + + _, err = testDb.db.FetchTxBySha(origintxsha) + if err != nil { + t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + } + } + } + } + for _, stx := range mblock.STransactions { + for _, txin := range stx.TxIn { + if txin.PreviousOutPoint.Index == uint32(4294967295) { + continue + } + if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { + _, used := opSpentInBlock[txin.PreviousOutPoint] + if !used { + // Origin tx is in the block and so hasn't been + // added yet, continue + opSpentInBlock[txin.PreviousOutPoint] = struct{}{} + continue + } else { + t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) + } + } + + origintxsha := &txin.PreviousOutPoint.Hash + txneededList = append(txneededList, origintxsha) + + exists, err := testDb.db.ExistsTxSha(origintxsha) + if err != nil { + t.Errorf("ExistsTxSha: unexpected error %v ", err) + } + if !exists { + t.Errorf("referenced tx not found %v", origintxsha) + } + + _, err = testDb.db.FetchTxBySha(origintxsha) + if err != nil { + t.Errorf("referenced tx not found %v err %v ", origintxsha, err) + } + } + } + txlist := testDb.db.FetchUnSpentTxByShaList(txneededList) + for _, txe := range txlist { + if txe.Err != nil { + t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) + break out } } } - txlist := testDb.db.FetchUnSpentTxByShaList(txneededList) - for _, txe := range txlist { - if txe.Err != nil { - t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) - break out - } - } - newheight, err := testDb.db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) @@ -308,11 +371,9 @@ func testBackout(t *testing.T) { err = nil for height := int64(0); height < int64(len(testDb.blocks)); height++ { if height == 100 { - t.Logf("Syncing at block height 100") testDb.db.Sync() } if height == 120 { - t.Logf("Simulating unexpected application quit") // Simulate unexpected application quit testDb.db.RollbackClose() break @@ -365,7 +426,8 @@ func testBackout(t *testing.T) { return } - block := testDb.blocks[119] + // pick block 118 since tx for block 119 wont be inserted until block 120 is seen to be valid + block := testDb.blocks[118] mblock := block.MsgBlock() txsha := mblock.Transactions[0].TxSha() exists, err := testDb.db.ExistsTxSha(&txsha) @@ -383,83 +445,47 @@ func testBackout(t *testing.T) { } } -var savedblocks []*btcutil.Block - -func loadBlocks(t *testing.T, file string) (blocks []*btcutil.Block, err error) { - if len(savedblocks) != 0 { - blocks = savedblocks - return - } - testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2") - var dr io.Reader - var fi io.ReadCloser - fi, err = os.Open(testdatafile) +func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) { + fi, err := os.Open(file) if err != nil { - t.Errorf("failed to open file %v, err %v", testdatafile, err) - return + t.Errorf("failed to open file %v, err %v", file, err) + return nil, err } - if strings.HasSuffix(testdatafile, ".bz2") { - z := bzip2.NewReader(fi) - dr = z - } else { - dr = fi + bcStream := bzip2.NewReader(fi) + defer fi.Close() + + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) + + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockchain := make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockchain); err != nil { + t.Errorf("error decoding test blockchain") } - - defer func() { - if err := fi.Close(); err != nil { - t.Errorf("failed to close file %v %v", testdatafile, err) - } - }() - - // Set the first block as the genesis block. - genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) - blocks = append(blocks, genesis) - - var block *btcutil.Block - err = nil - for height := int64(1); err == nil; height++ { - var rintbuf uint32 - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - if err == io.EOF { - // hit end of file at expected offset: no warning - height-- - err = nil - break - } - if err != nil { - t.Errorf("failed to load network type, err %v", err) - break - } - if rintbuf != uint32(network) { - t.Errorf("Block doesn't match network: %v expects %v", - rintbuf, network) - break - } - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - blocklen := rintbuf - - rbytes := make([]byte, blocklen) - - // read block - dr.Read(rbytes) - - block, err = btcutil.NewBlockFromBytes(rbytes) + blocks = make([]*dcrutil.Block, 0, len(blockchain)) + for height := int64(1); height < int64(len(blockchain)); height++ { + block, err := dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { t.Errorf("failed to parse block %v", height) - return + return nil, err } + block.SetHeight(height - 1) blocks = append(blocks, block) } - savedblocks = blocks + return } -func testFetchHeightRange(t *testing.T, db database.Db, blocks []*btcutil.Block) { +func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) { var testincrement int64 = 50 var testcnt int64 = 100 - shanames := make([]*wire.ShaHash, len(blocks)) + shanames := make([]*chainhash.Hash, len(blocks)) nBlocks := int64(len(blocks)) @@ -507,11 +533,14 @@ func TestLimitAndSkipFetchTxsForAddr(t *testing.T) { return } defer testDb.cleanUpFunc() - + _, err = testDb.db.InsertBlock(testDb.blocks[0]) + if err != nil { + t.Fatalf("failed to insert initial block") + } // Insert a block with some fake test transactions. The block will have // 10 copies of a fake transaction involving same address. - addrString := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" - targetAddr, err := btcutil.DecodeAddress(addrString, &chaincfg.MainNetParams) + addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk" + targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams) if err != nil { t.Fatalf("Unable to decode test address: %v", err) } @@ -520,30 +549,38 @@ func TestLimitAndSkipFetchTxsForAddr(t *testing.T) { t.Fatalf("Unable make test pkScript %v", err) } fakeTxOut := wire.NewTxOut(10, outputScript) - var emptyHash wire.ShaHash - fakeHeader := wire.NewBlockHeader(&emptyHash, &emptyHash, 1, 1) + var emptyHash chainhash.Hash + fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{}) msgBlock := wire.NewMsgBlock(fakeHeader) for i := 0; i < 10; i++ { mtx := wire.NewMsgTx() mtx.AddTxOut(fakeTxOut) msgBlock.AddTransaction(mtx) } - + lastBlock := testDb.blocks[0] + msgBlock.Header.PrevBlock = *lastBlock.Sha() // Insert the test block into the DB. - testBlock := btcutil.NewBlock(msgBlock) + testBlock := dcrutil.NewBlock(msgBlock) newheight, err := testDb.db.InsertBlock(testBlock) if err != nil { t.Fatalf("Unable to insert block into db: %v", err) } // Create and insert an address index for out test addr. - txLoc, _ := testBlock.TxLoc() - index := make(database.BlockAddrIndex) + txLoc, _, _ := testBlock.TxLoc() + index := make(database.BlockAddrIndex, len(txLoc)) for i := range testBlock.Transactions() { var hash160 [ripemd160.Size]byte scriptAddr := targetAddr.ScriptAddress() copy(hash160[:], scriptAddr[:]) - index[hash160] = append(index[hash160], &txLoc[i]) + txAddrIndex := &database.TxAddrIndex{ + hash160, + uint32(newheight), + uint32(txLoc[i].TxStart), + uint32(txLoc[i].TxLen), + } + + index[i] = txAddrIndex } blkSha := testBlock.Sha() err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index) diff --git a/database/ldb/tx.go b/database/ldb/tx.go index b0f5586f..3b396e0a 100644 --- a/database/ldb/tx.go +++ b/database/ldb/tx.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,13 +9,16 @@ import ( "bytes" "encoding/binary" "errors" + "fmt" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/btcsuite/golangcrypto/ripemd160" "github.com/btcsuite/goleveldb/leveldb" "github.com/btcsuite/goleveldb/leveldb/util" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -45,8 +49,9 @@ var addrIndexKeyPrefix = []byte("a+-") var addrIndexVersionKey = []byte("addrindexversion") type txUpdateObj struct { - txSha *wire.ShaHash + txSha *chainhash.Hash blkHeight int64 + blkIndex uint32 txoff int txlen int ntxout int @@ -56,6 +61,7 @@ type txUpdateObj struct { type spentTx struct { blkHeight int64 + blkIndex uint32 txoff int txlen int numTxO int @@ -66,28 +72,22 @@ type spentTxUpdate struct { delete bool } -type txAddrIndex struct { - hash160 [ripemd160.Size]byte - blkHeight int64 - txoffset int - txlen int -} - // InsertTx inserts a tx hash and its associated data into the database. -func (db *LevelDb) InsertTx(txsha *wire.ShaHash, height int64, txoff int, txlen int, spentbuf []byte) (err error) { +func (db *LevelDb) InsertTx(txsha *chainhash.Hash, height int64, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { db.dbLock.Lock() defer db.dbLock.Unlock() - return db.insertTx(txsha, height, txoff, txlen, spentbuf) + return db.insertTx(txsha, height, idx, txoff, txlen, spentbuf) } // insertTx inserts a tx hash and its associated data into the database. // Must be called with db lock held. -func (db *LevelDb) insertTx(txSha *wire.ShaHash, height int64, txoff int, txlen int, spentbuf []byte) (err error) { +func (db *LevelDb) insertTx(txSha *chainhash.Hash, height int64, idx uint32, txoff int, txlen int, spentbuf []byte) (err error) { var txU txUpdateObj txU.txSha = txSha txU.blkHeight = height + txU.blkIndex = idx txU.txoff = txoff txU.txlen = txlen txU.spentData = spentbuf @@ -104,33 +104,36 @@ func (db *LevelDb) formatTx(txu *txUpdateObj) []byte { txLen := uint32(txu.txlen) spentbuf := txu.spentData - txW := make([]byte, 16+len(spentbuf)) + txW := make([]byte, 20+len(spentbuf)) binary.LittleEndian.PutUint64(txW[0:8], blkHeight) - binary.LittleEndian.PutUint32(txW[8:12], txOff) - binary.LittleEndian.PutUint32(txW[12:16], txLen) - copy(txW[16:], spentbuf) + binary.LittleEndian.PutUint32(txW[8:12], txu.blkIndex) + binary.LittleEndian.PutUint32(txW[12:16], txOff) + binary.LittleEndian.PutUint32(txW[16:20], txLen) + + copy(txW[20:], spentbuf) return txW[:] } -func (db *LevelDb) getTxData(txsha *wire.ShaHash) (int64, int, int, []byte, error) { +func (db *LevelDb) getTxData(txsha *chainhash.Hash) (int64, uint32, int, int, []byte, error) { key := shaTxToKey(txsha) buf, err := db.lDb.Get(key, db.ro) if err != nil { - return 0, 0, 0, nil, err + return 0, 0, 0, 0, nil, err } blkHeight := binary.LittleEndian.Uint64(buf[0:8]) - txOff := binary.LittleEndian.Uint32(buf[8:12]) - txLen := binary.LittleEndian.Uint32(buf[12:16]) + blkIndex := binary.LittleEndian.Uint32(buf[8:12]) + txOff := binary.LittleEndian.Uint32(buf[12:16]) + txLen := binary.LittleEndian.Uint32(buf[16:20]) - spentBuf := make([]byte, len(buf)-16) - copy(spentBuf, buf[16:]) + spentBuf := make([]byte, len(buf)-20) + copy(spentBuf, buf[20:]) - return int64(blkHeight), int(txOff), int(txLen), spentBuf, nil + return int64(blkHeight), blkIndex, int(txOff), int(txLen), spentBuf, nil } -func (db *LevelDb) getTxFullySpent(txsha *wire.ShaHash) ([]*spentTx, error) { +func (db *LevelDb) getTxFullySpent(txsha *chainhash.Hash) ([]*spentTx, error) { var badTxList, spentTxList []*spentTx @@ -141,19 +144,21 @@ func (db *LevelDb) getTxFullySpent(txsha *wire.ShaHash) ([]*spentTx, error) { } else if err != nil { return badTxList, err } - txListLen := len(buf) / 20 + txListLen := len(buf) / 24 spentTxList = make([]*spentTx, txListLen, txListLen) for i := range spentTxList { - offset := i * 20 + offset := i * 24 blkHeight := binary.LittleEndian.Uint64(buf[offset : offset+8]) - txOff := binary.LittleEndian.Uint32(buf[offset+8 : offset+12]) - txLen := binary.LittleEndian.Uint32(buf[offset+12 : offset+16]) - numTxO := binary.LittleEndian.Uint32(buf[offset+16 : offset+20]) + blkIndex := binary.LittleEndian.Uint32(buf[offset+8 : offset+12]) + txOff := binary.LittleEndian.Uint32(buf[offset+12 : offset+16]) + txLen := binary.LittleEndian.Uint32(buf[offset+16 : offset+20]) + numTxO := binary.LittleEndian.Uint32(buf[offset+20 : offset+24]) sTx := spentTx{ blkHeight: int64(blkHeight), + blkIndex: blkIndex, txoff: int(txOff), txlen: int(txLen), numTxO: int(numTxO), @@ -166,26 +171,28 @@ func (db *LevelDb) getTxFullySpent(txsha *wire.ShaHash) ([]*spentTx, error) { } func (db *LevelDb) formatTxFullySpent(sTxList []*spentTx) []byte { - txW := make([]byte, 20*len(sTxList)) + txW := make([]byte, 24*len(sTxList)) for i, sTx := range sTxList { blkHeight := uint64(sTx.blkHeight) + blkIndex := sTx.blkIndex txOff := uint32(sTx.txoff) txLen := uint32(sTx.txlen) numTxO := uint32(sTx.numTxO) - offset := i * 20 + offset := i * 24 binary.LittleEndian.PutUint64(txW[offset:offset+8], blkHeight) - binary.LittleEndian.PutUint32(txW[offset+8:offset+12], txOff) - binary.LittleEndian.PutUint32(txW[offset+12:offset+16], txLen) - binary.LittleEndian.PutUint32(txW[offset+16:offset+20], numTxO) + binary.LittleEndian.PutUint32(txW[offset+8:offset+12], blkIndex) + binary.LittleEndian.PutUint32(txW[offset+12:offset+16], txOff) + binary.LittleEndian.PutUint32(txW[offset+16:offset+20], txLen) + binary.LittleEndian.PutUint32(txW[offset+20:offset+24], numTxO) } return txW } // ExistsTxSha returns if the given tx sha exists in the database -func (db *LevelDb) ExistsTxSha(txsha *wire.ShaHash) (bool, error) { +func (db *LevelDb) ExistsTxSha(txsha *chainhash.Hash) (bool, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -194,14 +201,14 @@ func (db *LevelDb) ExistsTxSha(txsha *wire.ShaHash) (bool, error) { // existsTxSha returns if the given tx sha exists in the database.o // Must be called with the db lock held. -func (db *LevelDb) existsTxSha(txSha *wire.ShaHash) (bool, error) { +func (db *LevelDb) existsTxSha(txSha *chainhash.Hash) (bool, error) { key := shaTxToKey(txSha) return db.lDb.Has(key, db.ro) } // FetchTxByShaList returns the most recent tx of the name fully spent or not -func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply { +func (db *LevelDb) FetchTxByShaList(txShaList []*chainhash.Hash) []*database.TxListReply { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -209,7 +216,7 @@ func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxLis // to FetchUnSpentTxByShaList replies := make([]*database.TxListReply, len(txShaList)) for i, txsha := range txShaList { - tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha) + tx, blockSha, height, blkIdx, txspent, err := db.fetchTxDataBySha(txsha) btxspent := []bool{} if err == nil { btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut)) @@ -227,6 +234,8 @@ func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxLis if fSerr == nil && len(sTxList) != 0 { idx := len(sTxList) - 1 stx := sTxList[idx] + height = stx.blkHeight + blkIdx = stx.blkIndex tx, blockSha, _, _, err = db.fetchTxDataByLoc( stx.blkHeight, stx.txoff, stx.txlen, []byte{}) @@ -238,7 +247,7 @@ func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxLis } } } - txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err} + txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, Index: blkIdx, TxSpent: btxspent, Err: err} replies[i] = &txlre } return replies @@ -246,13 +255,13 @@ func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxLis // FetchUnSpentTxByShaList given a array of ShaHash, look up the transactions // and return them in a TxListReply array. -func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply { +func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*chainhash.Hash) []*database.TxListReply { db.dbLock.Lock() defer db.dbLock.Unlock() replies := make([]*database.TxListReply, len(txShaList)) for i, txsha := range txShaList { - tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha) + tx, blockSha, height, blkIdx, txspent, err := db.fetchTxDataBySha(txsha) btxspent := []bool{} if err == nil { btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut)) @@ -262,32 +271,35 @@ func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*databas btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0 } } - txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err} + txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, Index: blkIdx, TxSpent: btxspent, Err: err} replies[i] = &txlre } return replies } // fetchTxDataBySha returns several pieces of data regarding the given sha. -func (db *LevelDb) fetchTxDataBySha(txsha *wire.ShaHash) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int64, rtxspent []byte, err error) { +func (db *LevelDb) fetchTxDataBySha(txsha *chainhash.Hash) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int64, ridx uint32, rtxspent []byte, err error) { var blkHeight int64 + var blkIndex uint32 var txspent []byte var txOff, txLen int - blkHeight, txOff, txLen, txspent, err = db.getTxData(txsha) + blkHeight, blkIndex, txOff, txLen, txspent, err = db.getTxData(txsha) if err != nil { if err == leveldb.ErrNotFound { err = database.ErrTxShaMissing } return } - return db.fetchTxDataByLoc(blkHeight, txOff, txLen, txspent) + mtx, hash, _, _, err := db.fetchTxDataByLoc(blkHeight, txOff, txLen, txspent) + + return mtx, hash, blkHeight, blkIndex, txspent, err } // fetchTxDataByLoc returns several pieces of data regarding the given tx // located by the block/offset/size location -func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int64, rtxspent []byte, err error) { - var blksha *wire.ShaHash +func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *chainhash.Hash, rheight int64, rtxspent []byte, err error) { + var blksha *chainhash.Hash var blkbuf []byte blksha, blkbuf, err = db.getBlkByHeight(blkHeight) @@ -298,11 +310,10 @@ func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspe return } - //log.Trace("transaction %v is at block %v %v txoff %v, txlen %v\n", - // txsha, blksha, blkHeight, txOff, txLen) - if len(blkbuf) < txOff+txLen { - err = database.ErrTxShaMissing + log.Warnf("block buffer overrun while looking for tx: "+ + "block %v %v txoff %v txlen %v", blkHeight, blksha, txOff, txLen) + err = database.ErrDbInconsistency return } rbuf := bytes.NewReader(blkbuf[txOff : txOff+txLen]) @@ -312,6 +323,7 @@ func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspe if err != nil { log.Warnf("unable to decode tx block %v %v txoff %v txlen %v", blkHeight, blksha, txOff, txLen) + err = database.ErrDbInconsistency return } @@ -319,14 +331,14 @@ func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspe } // FetchTxBySha returns some data for the given Tx Sha. -func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, error) { +func (db *LevelDb) FetchTxBySha(txsha *chainhash.Hash) ([]*database.TxListReply, error) { db.dbLock.Lock() defer db.dbLock.Unlock() replylen := 0 replycnt := 0 - tx, blksha, height, txspent, txerr := db.fetchTxDataBySha(txsha) + tx, blksha, height, blkIdx, txspent, txerr := db.fetchTxDataBySha(txsha) if txerr == nil { replylen++ } else { @@ -361,7 +373,7 @@ func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, e for i := range btxspent { btxspent[i] = true } - txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: stx.blkHeight, TxSpent: btxspent, Err: nil} + txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: stx.blkHeight, Index: stx.blkIndex, TxSpent: btxspent, Err: nil} replies[replycnt] = &txlre replycnt++ } @@ -373,7 +385,7 @@ func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, e byteoff := uint(idx % 8) btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0 } - txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: height, TxSpent: btxspent, Err: nil} + txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: height, Index: blkIdx, TxSpent: btxspent, Err: nil} replies[replycnt] = &txlre replycnt++ } @@ -385,25 +397,28 @@ func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, e // in order to ensure that the transactions are sorted in the index. // This gives us the ability to use the index in more client-side // applications that are order-dependent (specifically by dependency). -func addrIndexToKey(index *txAddrIndex) []byte { +func addrIndexToKey(index *database.TxAddrIndex) []byte { record := make([]byte, addrIndexKeyLength, addrIndexKeyLength) copy(record[0:3], addrIndexKeyPrefix) - copy(record[3:23], index.hash160[:]) + copy(record[3:23], index.Hash160[:]) // The index itself. - binary.BigEndian.PutUint32(record[23:27], uint32(index.blkHeight)) - binary.BigEndian.PutUint32(record[27:31], uint32(index.txoffset)) - binary.BigEndian.PutUint32(record[31:35], uint32(index.txlen)) + binary.BigEndian.PutUint32(record[23:27], uint32(index.Height)) + binary.BigEndian.PutUint32(record[27:31], uint32(index.TxOffset)) + binary.BigEndian.PutUint32(record[31:35], uint32(index.TxLen)) return record } // unpackTxIndex deserializes the raw bytes of a address tx index. -func unpackTxIndex(rawIndex [12]byte) *txAddrIndex { - return &txAddrIndex{ - blkHeight: int64(binary.BigEndian.Uint32(rawIndex[0:4])), - txoffset: int(binary.BigEndian.Uint32(rawIndex[4:8])), - txlen: int(binary.BigEndian.Uint32(rawIndex[8:12])), +func unpackTxIndex(rawIndex [database.AddrIndexKeySize]byte) *database.TxAddrIndex { + var addr [ripemd160.Size]byte + copy(addr[:], rawIndex[3:23]) + return &database.TxAddrIndex{ + addr, + binary.BigEndian.Uint32(rawIndex[23:27]), + binary.BigEndian.Uint32(rawIndex[27:31]), + binary.BigEndian.Uint32(rawIndex[31:35]), } } @@ -429,7 +444,7 @@ func bytesPrefix(prefix []byte) *util.Range { // should be the max number of transactions to be returned. Additionally, if the // caller wishes to seek forward in the results some amount, the 'seek' // represents how many results to skip. -func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int, +func (db *LevelDb) FetchTxsForAddr(addr dcrutil.Address, skip int, limit int) ([]*database.TxListReply, error) { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -438,20 +453,26 @@ func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int, if skip < 0 { return nil, errors.New("offset for skip must be positive") } - if limit < 0 { + if limit < 1 { return nil, errors.New("value for limit must be positive") } // Parse address type, bailing on an unknown type. var addrKey []byte switch addr := addr.(type) { - case *btcutil.AddressPubKeyHash: + case *dcrutil.AddressPubKeyHash: hash160 := addr.Hash160() addrKey = hash160[:] - case *btcutil.AddressScriptHash: + case *dcrutil.AddressScriptHash: hash160 := addr.Hash160() addrKey = hash160[:] - case *btcutil.AddressPubKey: + case *dcrutil.AddressSecpPubKey: + hash160 := addr.AddressPubKeyHash().Hash160() + addrKey = hash160[:] + case *dcrutil.AddressEdwardsPubKey: + hash160 := addr.AddressPubKeyHash().Hash160() + addrKey = hash160[:] + case *dcrutil.AddressSecSchnorrPubKey: hash160 := addr.AddressPubKeyHash().Hash160() addrKey = hash160[:] default: @@ -464,25 +485,39 @@ func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int, copy(addrPrefix[3:23], addrKey) iter := db.lDb.NewIterator(bytesPrefix(addrPrefix), nil) - for skip != 0 && iter.Next() { + for skip != 0 { + iter.Next() skip-- } // Iterate through all address indexes that match the targeted prefix. var replies []*database.TxListReply - var rawIndex [12]byte - for iter.Next() && limit != 0 { - copy(rawIndex[:], iter.Key()[23:35]) + var rawIndex [database.AddrIndexKeySize]byte + for iter.Next() { + if limit == 0 { + break + } + + copy(rawIndex[:], iter.Key()) addrIndex := unpackTxIndex(rawIndex) - tx, blkSha, blkHeight, _, err := db.fetchTxDataByLoc(addrIndex.blkHeight, - addrIndex.txoffset, addrIndex.txlen, []byte{}) + tx, blkSha, blkHeight, _, err := db.fetchTxDataByLoc( + int64(addrIndex.Height), + int(addrIndex.TxOffset), + int(addrIndex.TxLen), + []byte{}) if err != nil { - // Eat a possible error due to a potential re-org. + log.Warnf("tx listed in addrindex record not found, height: %v"+ + " offset: %v, len: %v", addrIndex.Height, addrIndex.TxOffset, + addrIndex.TxLen) + limit-- continue } - txSha := tx.TxSha() + var txSha chainhash.Hash + if tx != nil { + txSha = tx.TxSha() + } txReply := &database.TxListReply{Sha: &txSha, Tx: tx, BlkSha: blkSha, Height: blkHeight, TxSpent: []bool{}, Err: err} @@ -511,7 +546,8 @@ func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int, // append-only list for the stored value. However, this add unnecessary // overhead when storing and retrieving since the entire list must // be fetched each time. -func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int64, addrIndex database.BlockAddrIndex) error { +func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *chainhash.Hash, + blkHeight int64, addrIndexes database.BlockAddrIndex) error { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -521,18 +557,10 @@ func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int64 // Write all data for the new address indexes in a single batch // transaction. - for addrKey, indexes := range addrIndex { - for _, txLoc := range indexes { - index := &txAddrIndex{ - hash160: addrKey, - blkHeight: blkHeight, - txoffset: txLoc.TxStart, - txlen: txLoc.TxLen, - } - // The index is stored purely in the key. - packedIndex := addrIndexToKey(index) - batch.Put(packedIndex, blankData) - } + for _, addrIndex := range addrIndexes { + // The index is stored purely in the key. + packedIndex := addrIndexToKey(addrIndex) + batch.Put(packedIndex, blankData) } // Update tip of addrindex. @@ -557,9 +585,70 @@ func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int64 return nil } -// DeleteAddrIndex deletes the entire addrindex stored within the DB. +func (db *LevelDb) DropAddrIndexForBlock(blkSha *chainhash.Hash, + blkHeight int64, addrIndexes database.BlockAddrIndex) (rerr error) { + db.dbLock.Lock() + defer db.dbLock.Unlock() + defer db.lbatch.Reset() + + batch := db.lBatch() + + defer func() { + if rerr == nil { + rerr = db.lDb.Write(batch, db.wo) + } else { + batch.Reset() + } + }() + + tipIdx := db.lastAddrIndexBlkIdx + tipHash := db.lastAddrIndexBlkSha + + if tipIdx != blkHeight || !tipHash.IsEqual(blkSha) { + return fmt.Errorf("expected to receive a removal of hash %v, height %v"+ + ", but instead received hash %v, height %v", + tipHash, tipIdx, blkSha, blkHeight) + } + + // Write all data for the new address indexes in a single batch + // transaction. + for _, addrIndex := range addrIndexes { + // The index is stored purely in the key. + packedIndex := addrIndexToKey(addrIndex) + batch.Delete(packedIndex) + } + + parentHash, _, err := db.getBlkByHeight(blkHeight - 1) + if err != nil { + return err + } + phb := *parentHash + + // Update tip of addrindex. + newIndexTip := make([]byte, 40, 40) + copy(newIndexTip[0:32], phb[:]) + binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight-1)) + batch.Put(addrIndexMetaDataKey, newIndexTip) + + // Ensure we're writing an address index version + newIndexVersion := make([]byte, 2, 2) + binary.LittleEndian.PutUint16(newIndexVersion[0:2], + uint16(addrIndexCurrentVersion)) + batch.Put(addrIndexVersionKey, newIndexVersion) + + if err := db.lDb.Write(batch, db.wo); err != nil { + return err + } + + db.lastAddrIndexBlkIdx = blkHeight - 1 + db.lastAddrIndexBlkSha = phb + + return nil +} + +// PurgeAddrIndex deletes the entire addrindex stored within the DB. // It also resets the cached in-memory metadata about the addr index. -func (db *LevelDb) DeleteAddrIndex() error { +func (db *LevelDb) PurgeAddrIndex() error { db.dbLock.Lock() defer db.dbLock.Unlock() @@ -601,7 +690,7 @@ func (db *LevelDb) DeleteAddrIndex() error { } db.lastAddrIndexBlkIdx = -1 - db.lastAddrIndexBlkSha = wire.ShaHash{} + db.lastAddrIndexBlkSha = chainhash.Hash{} return nil } @@ -654,7 +743,7 @@ func (db *LevelDb) deleteOldAddrIndex() error { } db.lastAddrIndexBlkIdx = -1 - db.lastAddrIndexBlkSha = wire.ShaHash{} + db.lastAddrIndexBlkSha = chainhash.Hash{} return nil } diff --git a/database/log.go b/database/log.go index f9bed06c..1411e3d7 100644 --- a/database/log.go +++ b/database/log.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/database/memdb/doc.go b/database/memdb/doc.go index a2a28543..b5d786b6 100644 --- a/database/memdb/doc.go +++ b/database/memdb/doc.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/database/memdb/driver.go b/database/memdb/driver.go index 30d654cd..021b71c2 100644 --- a/database/memdb/driver.go +++ b/database/memdb/driver.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,8 +8,8 @@ package memdb import ( "fmt" - "github.com/btcsuite/btcd/database" "github.com/btcsuite/btclog" + "github.com/decred/dcrd/database" ) var log = btclog.Disabled diff --git a/database/memdb/memdb.go b/database/memdb/memdb.go index 98deab29..bf4c3b31 100644 --- a/database/memdb/memdb.go +++ b/database/memdb/memdb.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,9 +11,10 @@ import ( "math" "sync" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // Errors that the various database functions may return. @@ -21,17 +23,13 @@ var ( ) var ( - zeroHash = wire.ShaHash{} - - // The following two hashes are ones that must be specially handled. - // See the comments where they're used for more details. - dupTxHash91842 = newShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599") - dupTxHash91880 = newShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468") + zeroHash = chainhash.Hash{} ) // tTxInsertData holds information about the location and spent status of // a transaction. type tTxInsertData struct { + tree int8 blockHeight int64 offset int spentBuf []bool @@ -41,8 +39,8 @@ type tTxInsertData struct { // wire.ShaHash. It only differs from the one available in wire in that it // ignores the error since it will only (and must only) be called with // hard-coded, and therefore known good, hashes. -func newShaHashFromStr(hexStr string) *wire.ShaHash { - sha, _ := wire.NewShaHashFromStr(hexStr) +func newShaHashFromStr(hexStr string) *chainhash.Hash { + sha, _ := chainhash.NewHashFromStr(hexStr) return sha } @@ -80,17 +78,17 @@ type MemDb struct { // Embed a mutex for safe concurrent access. sync.Mutex - // blocks holds all of the bitcoin blocks that will be in the memory + // blocks holds all of the decred blocks that will be in the memory // database. blocks []*wire.MsgBlock // blocksBySha keeps track of block heights by hash. The height can // be used as an index into the blocks slice. - blocksBySha map[wire.ShaHash]int64 + blocksBySha map[chainhash.Hash]int64 // txns holds information about transactions such as which their // block height and spent status of all their outputs. - txns map[wire.ShaHash][]*tTxInsertData + txns map[chainhash.Hash][]*tTxInsertData // closed indicates whether or not the database has been closed and is // therefore invalidated. @@ -98,7 +96,7 @@ type MemDb struct { } // removeTx removes the passed transaction including unspending it. -func (db *MemDb) removeTx(msgTx *wire.MsgTx, txHash *wire.ShaHash) { +func (db *MemDb) removeTx(msgTx *wire.MsgTx, txHash *chainhash.Hash) { // Undo all of the spends for the transaction. for _, txIn := range msgTx.TxIn { if isCoinbaseInput(txIn) { @@ -157,7 +155,7 @@ func (db *MemDb) Close() error { // block. This is different than a simple truncate since the spend information // for each block must also be unwound. This is part of the database.Db interface // implementation. -func (db *MemDb) DropAfterBlockBySha(sha *wire.ShaHash) error { +func (db *MemDb) DropAfterBlockBySha(sha *chainhash.Hash) error { db.Lock() defer db.Unlock() @@ -179,25 +177,35 @@ func (db *MemDb) DropAfterBlockBySha(sha *wire.ShaHash) error { // remove the block. endHeight := int64(len(db.blocks) - 1) for i := endHeight; i > height; i-- { - // Unspend and remove each transaction in reverse order because - // later transactions in a block can reference earlier ones. - transactions := db.blocks[i].Transactions - for j := len(transactions) - 1; j >= 0; j-- { - tx := transactions[j] - txHash := tx.TxSha() - db.removeTx(tx, &txHash) + + blk := db.blocks[i] + blkprev := db.blocks[i-1] + // Unspend the stake tx in the current block + for _, tx := range blk.STransactions { + txSha := tx.TxSha() + db.removeTx(tx, &txSha) + } + + // Check to see if the regular txs of the parent were even included; if + // they are, unspend all of these regular tx too + votebits := blk.Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { + // Unspend the regular tx in the previous block + for _, tx := range blkprev.Transactions { + txSha := tx.TxSha() + db.removeTx(tx, &txSha) + } } db.blocks[i] = nil db.blocks = db.blocks[:i] } - return nil } // ExistsSha returns whether or not the given block hash is present in the // database. This is part of the database.Db interface implementation. -func (db *MemDb) ExistsSha(sha *wire.ShaHash) (bool, error) { +func (db *MemDb) ExistsSha(sha *chainhash.Hash) (bool, error) { db.Lock() defer db.Unlock() @@ -212,22 +220,24 @@ func (db *MemDb) ExistsSha(sha *wire.ShaHash) (bool, error) { return false, nil } -// FetchBlockBySha returns a btcutil.Block. The implementation may cache the +// FetchBlockBySha returns a dcrutil.Block. The implementation may cache the // underlying data if desired. This is part of the database.Db interface // implementation. // // This implementation does not use any additional cache since the entire // database is already in memory. -func (db *MemDb) FetchBlockBySha(sha *wire.ShaHash) (*btcutil.Block, error) { +func (db *MemDb) FetchBlockBySha(sha *chainhash.Hash) (*dcrutil.Block, error) { db.Lock() defer db.Unlock() + return db.fetchBlockBySha(sha) +} +func (db *MemDb) fetchBlockBySha(sha *chainhash.Hash) (*dcrutil.Block, error) { if db.closed { return nil, ErrDbClosed } - if blockHeight, exists := db.blocksBySha[*sha]; exists { - block := btcutil.NewBlock(db.blocks[int(blockHeight)]) + block := dcrutil.NewBlock(db.blocks[int(blockHeight)]) block.SetHeight(blockHeight) return block, nil } @@ -237,7 +247,7 @@ func (db *MemDb) FetchBlockBySha(sha *wire.ShaHash) (*btcutil.Block, error) { // FetchBlockHeightBySha returns the block height for the given hash. This is // part of the database.Db interface implementation. -func (db *MemDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int64, error) { +func (db *MemDb) FetchBlockHeightBySha(sha *chainhash.Hash) (int64, error) { db.Lock() defer db.Unlock() @@ -258,7 +268,7 @@ func (db *MemDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int64, error) { // // This implementation does not use any additional cache since the entire // database is already in memory. -func (db *MemDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (*wire.BlockHeader, error) { +func (db *MemDb) FetchBlockHeaderBySha(sha *chainhash.Hash) (*wire.BlockHeader, error) { db.Lock() defer db.Unlock() @@ -275,7 +285,7 @@ func (db *MemDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (*wire.BlockHeader, er // FetchBlockShaByHeight returns a block hash based on its height in the block // chain. This is part of the database.Db interface implementation. -func (db *MemDb) FetchBlockShaByHeight(height int64) (*wire.ShaHash, error) { +func (db *MemDb) FetchBlockShaByHeight(height int64) (*chainhash.Hash, error) { db.Lock() defer db.Unlock() @@ -299,7 +309,7 @@ func (db *MemDb) FetchBlockShaByHeight(height int64) (*wire.ShaHash, error) { // Fetch is inclusive of the start height and exclusive of the ending height. // To fetch all hashes from the start height until no more are present, use the // special id `AllShas'. This is part of the database.Db interface implementation. -func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]wire.ShaHash, error) { +func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]chainhash.Hash, error) { db.Lock() defer db.Unlock() @@ -326,7 +336,7 @@ func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]wire.ShaHash, // Fetch as many as are availalbe within the specified range. lastBlockIndex := int64(len(db.blocks) - 1) - hashList := make([]wire.ShaHash, 0, endHeight-startHeight) + hashList := make([]chainhash.Hash, 0, endHeight-startHeight) for i := startHeight; i < endHeight; i++ { if i > lastBlockIndex { break @@ -343,7 +353,7 @@ func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]wire.ShaHash, // ExistsTxSha returns whether or not the given transaction hash is present in // the database and is not fully spent. This is part of the database.Db interface // implementation. -func (db *MemDb) ExistsTxSha(sha *wire.ShaHash) (bool, error) { +func (db *MemDb) ExistsTxSha(sha *chainhash.Hash) (bool, error) { db.Lock() defer db.Unlock() @@ -364,14 +374,13 @@ func (db *MemDb) ExistsTxSha(sha *wire.ShaHash) (bool, error) { // // This implementation does not use any additional cache since the entire // database is already in memory. -func (db *MemDb) FetchTxBySha(txHash *wire.ShaHash) ([]*database.TxListReply, error) { +func (db *MemDb) FetchTxBySha(txHash *chainhash.Hash) ([]*database.TxListReply, error) { db.Lock() defer db.Unlock() if db.closed { return nil, ErrDbClosed } - txns, exists := db.txns[*txHash] if !exists { log.Warnf("FetchTxBySha: requested hash of %s does not exist", @@ -387,17 +396,28 @@ func (db *MemDb) FetchTxBySha(txHash *wire.ShaHash) ([]*database.TxListReply, er spentBuf := make([]bool, len(txD.spentBuf)) copy(spentBuf, txD.spentBuf) - reply := database.TxListReply{ - Sha: &txHashCopy, - Tx: msgBlock.Transactions[txD.offset], - BlkSha: &blockSha, - Height: txD.blockHeight, - TxSpent: spentBuf, - Err: nil, + if txD.tree == dcrutil.TxTreeRegular { + reply := database.TxListReply{ + Sha: &txHashCopy, + Tx: msgBlock.Transactions[txD.offset], + BlkSha: &blockSha, + Height: txD.blockHeight, + TxSpent: spentBuf, + Err: nil, + } + replyList[i] = &reply + } else if txD.tree == dcrutil.TxTreeStake { + reply := database.TxListReply{ + Sha: &txHashCopy, + Tx: msgBlock.STransactions[txD.offset], + BlkSha: &blockSha, + Height: txD.blockHeight, + TxSpent: spentBuf, + Err: nil, + } + replyList[i] = &reply } - replyList[i] = &reply } - return replyList, nil } @@ -412,7 +432,7 @@ func (db *MemDb) FetchTxBySha(txHash *wire.ShaHash) ([]*database.TxListReply, er // will indicate the transaction does not exist. // // This function must be called with the db lock held. -func (db *MemDb) fetchTxByShaList(txShaList []*wire.ShaHash, includeSpent bool) []*database.TxListReply { +func (db *MemDb) fetchTxByShaList(txShaList []*chainhash.Hash, includeSpent bool) []*database.TxListReply { replyList := make([]*database.TxListReply, 0, len(txShaList)) for i, hash := range txShaList { // Every requested entry needs a response, so start with nothing @@ -453,6 +473,11 @@ func (db *MemDb) fetchTxByShaList(txShaList []*wire.ShaHash, includeSpent bool) copy(spentBuf, txD.spentBuf) // Populate the reply. + if txD.tree == dcrutil.TxTreeRegular { + reply.Tx = msgBlock.Transactions[txD.offset] + } else if txD.tree == dcrutil.TxTreeStake { + reply.Tx = msgBlock.STransactions[txD.offset] + } reply.Tx = msgBlock.Transactions[txD.offset] reply.BlkSha = &blockSha reply.Height = txD.blockHeight @@ -484,7 +509,7 @@ func (db *MemDb) fetchTxByShaList(txShaList []*wire.ShaHash, includeSpent bool) // // This implementation does not use any additional cache since the entire // database is already in memory. -func (db *MemDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply { +func (db *MemDb) FetchTxByShaList(txShaList []*chainhash.Hash) []*database.TxListReply { db.Lock() defer db.Unlock() @@ -503,7 +528,7 @@ func (db *MemDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListR // // This implementation does not use any additional cache since the entire // database is already in memory. -func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply { +func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*chainhash.Hash) []*database.TxListReply { db.Lock() defer db.Unlock() @@ -515,7 +540,7 @@ func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database. // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. This is part of the database.Db interface // implementation. -func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) { +func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { db.Lock() defer db.Unlock() @@ -532,102 +557,128 @@ func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) { return 0, database.ErrPrevShaMissing } } + var blockPrev *dcrutil.Block = nil + // Decred: WARNING. This function assumes that all block insertion calls have + // dcrutil.blocks passed to them with block.blockHeight set correctly. However, + // loading the genesis block in dcrd didn't do this (via block manager); pre- + // production it should be established that all calls to this function pass + // blocks with block.blockHeight set correctly. + if len(db.blocks) > 0 { + var errBlockPrev error + blockPrev, errBlockPrev = db.fetchBlockBySha(&msgBlock.Header.PrevBlock) + if errBlockPrev != nil { + blockSha := block.Sha() + log.Warnf("Failed to fetch parent block of block %v", blockSha) + return 0, errBlockPrev + } + } // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. - txInFlight := map[wire.ShaHash]int{} - transactions := block.Transactions() - for i, tx := range transactions { - txInFlight[*tx.Sha()] = i - } - + newHeight := int64(len(db.blocks)) + txInFlight := map[chainhash.Hash]int{} // Loop through all transactions and inputs to ensure there are no error // conditions that would prevent them from be inserted into the db. // Although these checks could could be done in the loop below, checking // for error conditions up front means the code below doesn't have to // deal with rollback on errors. - newHeight := int64(len(db.blocks)) - for i, tx := range transactions { - // Two old blocks contain duplicate transactions due to being - // mined by faulty miners and accepted by the origin Satoshi - // client. Rules have since been added to the ensure this - // problem can no longer happen, but the two duplicate - // transactions which were originally accepted are forever in - // the block chain history and must be dealth with specially. - // http://blockexplorer.com/b/91842 - // http://blockexplorer.com/b/91880 - if newHeight == 91842 && tx.Sha().IsEqual(dupTxHash91842) { - continue + votebits := block.MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + transactions := blockPrev.Transactions() + for i, tx := range transactions { + txInFlight[*tx.Sha()] = i } - - if newHeight == 91880 && tx.Sha().IsEqual(dupTxHash91880) { - continue - } - - for _, txIn := range tx.MsgTx().TxIn { - if isCoinbaseInput(txIn) { - continue - } - - // It is acceptable for a transaction input to reference - // the output of another transaction in this block only - // if the referenced transaction comes before the - // current one in this block. - prevOut := &txIn.PreviousOutPoint - if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok { - if i <= inFlightIndex { - log.Warnf("InsertBlock: requested hash "+ - " of %s does not exist in-flight", - tx.Sha()) - return 0, database.ErrTxShaMissing + for i, tx := range transactions { + for _, txIn := range tx.MsgTx().TxIn { + if isCoinbaseInput(txIn) { + continue } - } else { - originTxns, exists := db.txns[prevOut.Hash] - if !exists { - log.Warnf("InsertBlock: requested hash "+ - "of %s by %s does not exist", - prevOut.Hash, tx.Sha()) - return 0, database.ErrTxShaMissing - } - originTxD := originTxns[len(originTxns)-1] - if prevOut.Index > uint32(len(originTxD.spentBuf)) { - log.Warnf("InsertBlock: requested hash "+ - "of %s with index %d does not "+ - "exist", tx.Sha(), prevOut.Index) - return 0, database.ErrTxShaMissing + + // It is acceptable for a transaction input to reference + // the output of another transaction in this block only + // if the referenced transaction comes before the + // current one in this block. + prevOut := &txIn.PreviousOutPoint + if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok { + if i <= inFlightIndex { + log.Warnf("InsertBlock: requested hash "+ + " of %s does not exist in-flight", + tx.Sha()) + return 0, database.ErrTxShaMissing + } + } else { + originTxns, exists := db.txns[prevOut.Hash] + if !exists { + log.Warnf("InsertBlock: requested hash "+ + "of %s by %s does not exist", + prevOut.Hash, tx.Sha()) + return 0, database.ErrTxShaMissing + } + originTxD := originTxns[len(originTxns)-1] + if prevOut.Index > uint32(len(originTxD.spentBuf)) { + log.Warnf("InsertBlock: requested hash "+ + "of %s with index %d does not "+ + "exist", tx.Sha(), prevOut.Index) + return 0, database.ErrTxShaMissing + } } } - } - // Prevent duplicate transactions in the same block. - if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists && - inFlightIndex < i { - log.Warnf("Block contains duplicate transaction %s", - tx.Sha()) - return 0, database.ErrDuplicateSha - } - - // Prevent duplicate transactions unless the old one is fully - // spent. - if txns, exists := db.txns[*tx.Sha()]; exists { - txD := txns[len(txns)-1] - if !isFullySpent(txD) { - log.Warnf("Attempt to insert duplicate "+ - "transaction %s", tx.Sha()) + // Prevent duplicate transactions in the same block. + if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists && + inFlightIndex < i { + log.Warnf("Block contains duplicate transaction %s", + tx.Sha()) return 0, database.ErrDuplicateSha } + + // Prevent duplicate transactions unless the old one is fully + // spent. + if txns, exists := db.txns[*tx.Sha()]; exists { + txD := txns[len(txns)-1] + if !isFullySpent(txD) { + log.Warnf("Attempt to insert duplicate "+ + "transaction %s", tx.Sha()) + return 0, database.ErrDuplicateSha + } + } } } db.blocks = append(db.blocks, msgBlock) - db.blocksBySha[*block.Sha()] = newHeight + db.blocksBySha[msgBlock.Header.BlockSha()] = newHeight + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { + // Insert information about eacj transaction and spend all of the + // outputs referenced by the inputs to the transactions. + for i, tx := range blockPrev.Transactions() { + // Insert the transaction data. + txD := tTxInsertData{ + tree: dcrutil.TxTreeRegular, + blockHeight: newHeight - 1, + offset: i, + spentBuf: make([]bool, len(tx.MsgTx().TxOut)), + } + db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) + // Spend all of the inputs. + for _, txIn := range tx.MsgTx().TxIn { + // Coinbase transaction has no inputs. + if isCoinbaseInput(txIn) { + continue + } - // Insert information about eacj transaction and spend all of the - // outputs referenced by the inputs to the transactions. - for i, tx := range block.Transactions() { + // Already checked for existing and valid ranges above. + prevOut := &txIn.PreviousOutPoint + originTxns := db.txns[prevOut.Hash] + originTxD := originTxns[len(originTxns)-1] + originTxD.spentBuf[prevOut.Index] = true + } + } + } + for i, tx := range block.STransactions() { // Insert the transaction data. txD := tTxInsertData{ + tree: dcrutil.TxTreeStake, blockHeight: newHeight, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), @@ -647,8 +698,8 @@ func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) { originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } - } + } return newHeight, nil } @@ -656,7 +707,7 @@ func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) { // the block chain. It will return the zero hash, -1 for the block height, and // no error (nil) if there are not any blocks in the database yet. This is part // of the database.Db interface implementation. -func (db *MemDb) NewestSha() (*wire.ShaHash, int64, error) { +func (db *MemDb) NewestSha() (*chainhash.Hash, int64, error) { db.Lock() defer db.Unlock() @@ -677,26 +728,33 @@ func (db *MemDb) NewestSha() (*wire.ShaHash, int64, error) { // FetchAddrIndexTip isn't currently implemented. This is a part of the // database.Db interface implementation. -func (db *MemDb) FetchAddrIndexTip() (*wire.ShaHash, int64, error) { +func (db *MemDb) FetchAddrIndexTip() (*chainhash.Hash, int64, error) { return nil, 0, database.ErrNotImplemented } // UpdateAddrIndexForBlock isn't currently implemented. This is a part of the // database.Db interface implementation. -func (db *MemDb) UpdateAddrIndexForBlock(*wire.ShaHash, int64, +func (db *MemDb) UpdateAddrIndexForBlock(*chainhash.Hash, int64, + database.BlockAddrIndex) error { + return database.ErrNotImplemented +} + +// DropAddrIndexForBlock isn't currently implemented. This is a part of the +// database.Db interface implementation. +func (db *MemDb) DropAddrIndexForBlock(*chainhash.Hash, int64, database.BlockAddrIndex) error { return database.ErrNotImplemented } // FetchTxsForAddr isn't currently implemented. This is a part of the database.Db // interface implementation. -func (db *MemDb) FetchTxsForAddr(btcutil.Address, int, int) ([]*database.TxListReply, error) { +func (db *MemDb) FetchTxsForAddr(dcrutil.Address, int, int) ([]*database.TxListReply, error) { return nil, database.ErrNotImplemented } -// DeleteAddrIndex isn't currently implemented. This is a part of the database.Db +// PurgeAddrIndex isn't currently implemented. This is a part of the database.Db // interface implementation. -func (db *MemDb) DeleteAddrIndex() error { +func (db *MemDb) PurgeAddrIndex() error { return database.ErrNotImplemented } @@ -737,8 +795,8 @@ func (db *MemDb) Sync() error { func newMemDb() *MemDb { db := MemDb{ blocks: make([]*wire.MsgBlock, 0, 200000), - blocksBySha: make(map[wire.ShaHash]int64), - txns: make(map[wire.ShaHash][]*tTxInsertData), + blocksBySha: make(map[chainhash.Hash]int64), + txns: make(map[chainhash.Hash][]*tTxInsertData), } return &db } diff --git a/database/memdb/memdb_test.go b/database/memdb/memdb_test.go index 68f556dd..c502ac19 100644 --- a/database/memdb/memdb_test.go +++ b/database/memdb/memdb_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,11 +9,11 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/database/memdb" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/database/memdb" + "github.com/decred/dcrutil" ) // TestClosed ensure calling the interface functions on a closed database @@ -25,7 +26,7 @@ func TestClosed(t *testing.T) { t.Errorf("Failed to open test database %v", err) return } - _, err = db.InsertBlock(btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)) + _, err = db.InsertBlock(dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)) if err != nil { t.Errorf("InsertBlock: %v", err) } @@ -64,7 +65,7 @@ func TestClosed(t *testing.T) { t.Errorf("FetchTxBySha: unexpected error %v", err) } - requestHashes := []*wire.ShaHash{genesisHash} + requestHashes := []*chainhash.Hash{genesisHash} reply := db.FetchTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ diff --git a/database/reorg_test.go b/database/reorg_test.go index be03fc96..f849c34b 100644 --- a/database/reorg_test.go +++ b/database/reorg_test.go @@ -1,20 +1,20 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package database_test import ( + "bytes" "compress/bzip2" - "encoding/binary" - "io" + "encoding/gob" "os" "path/filepath" - "strings" "testing" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) // testReorganization performs reorganization tests for the passed DB type. @@ -28,52 +28,57 @@ func testReorganization(t *testing.T, dbType string) { } defer teardown() - blocks, err := loadReorgBlocks("reorgblocks.bz2") + blocks, err := loadReorgBlocks("reorgto179.bz2") + if err != nil { + t.Fatalf("Error loading file: %v", err) + } + blocksReorg, err := loadReorgBlocks("reorgto180.bz2") if err != nil { t.Fatalf("Error loading file: %v", err) } - for i := int64(0); i <= 2; i++ { - _, err = db.InsertBlock(blocks[i]) - if err != nil { - t.Fatalf("Error inserting block %d (%v): %v", i, - blocks[i].Sha(), err) - } - var txIDs []string - for _, tx := range blocks[i].Transactions() { - txIDs = append(txIDs, tx.Sha().String()) + // Find where chain forks + var forkHash chainhash.Hash + var forkHeight int64 + for i, _ := range blocks { + if blocks[i].Sha().IsEqual(blocksReorg[i].Sha()) { + blkHash := blocks[i].Sha() + forkHash = *blkHash + forkHeight = int64(i) } } - for i := int64(1); i >= 0; i-- { - blkHash := blocks[i].Sha() - err = db.DropAfterBlockBySha(blkHash) - if err != nil { - t.Fatalf("Error removing block %d for reorganization: %v", i, err) - } - // Exercise NewestSha() to make sure DropAfterBlockBySha() updates the - // info correctly - maxHash, blkHeight, err := db.NewestSha() - if err != nil { - t.Fatalf("Error getting newest block info") - } - if !maxHash.IsEqual(blkHash) || blkHeight != i { - t.Fatalf("NewestSha returned %v (%v), expected %v (%v)", blkHeight, - maxHash, i, blkHash) - } - } - - for i := int64(3); i < int64(len(blocks)); i++ { + // Insert all blocks from chain 1 + for i := int64(0); i < int64(len(blocks)); i++ { blkHash := blocks[i].Sha() if err != nil { t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) } + _, err = db.InsertBlock(blocks[i]) if err != nil { t.Fatalf("Error inserting block %dA (%v): %v", i-2, blkHash, err) } } + // Remove blocks to fork point + db.DropAfterBlockBySha(&forkHash) + if err != nil { + t.Errorf("couldn't DropAfterBlockBySha: %v", err.Error()) + } + + // Insert blocks from the other chain to simulate a reorg + for i := forkHeight + 1; i < int64(len(blocksReorg)); i++ { + blkHash := blocksReorg[i].Sha() + if err != nil { + t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) + } + _, err = db.InsertBlock(blocksReorg[i]) + if err != nil { + t.Fatalf("Error inserting block %dA (%v): %v", i-2, blkHash, err) + } + } + _, maxHeight, err := db.NewestSha() if err != nil { t.Fatalf("Error getting newest block info") @@ -88,7 +93,18 @@ func testReorganization(t *testing.T, dbType string) { if err != nil { t.Fatalf("Error fetching block %d (%v): %v", i, blkHash, err) } - for _, tx := range block.Transactions() { + prevBlockSha := block.MsgBlock().Header.PrevBlock + prevBlock, _ := db.FetchBlockBySha(&prevBlockSha) + votebits := blocksReorg[i].MsgBlock().Header.VoteBits + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && prevBlock != nil { + for _, tx := range prevBlock.Transactions() { + _, err := db.FetchTxBySha(tx.Sha()) + if err != nil { + t.Fatalf("Error fetching transaction %v: %v", tx.Sha(), err) + } + } + } + for _, tx := range block.STransactions() { _, err := db.FetchTxBySha(tx.Sha()) if err != nil { t.Fatalf("Error fetching transaction %v: %v", tx.Sha(), err) @@ -97,71 +113,41 @@ func testReorganization(t *testing.T, dbType string) { } } -// loadReorgBlocks reads files containing bitcoin block data (bzipped but +// loadReorgBlocks reads files containing decred block data (bzipped but // otherwise in the format bitcoind writes) from disk and returns them as an -// array of btcutil.Block. This is copied from the blockchain package, which +// array of dcrutil.Block. This is copied from the blockchain package, which // itself largely borrowed it from the test code in this package. -func loadReorgBlocks(filename string) ([]*btcutil.Block, error) { - filename = filepath.Join("testdata/", filename) - - var blocks []*btcutil.Block - var err error - - var network = wire.SimNet - var dr io.Reader - var fi io.ReadCloser - - fi, err = os.Open(filename) +func loadReorgBlocks(filename string) ([]*dcrutil.Block, error) { + filename = filepath.Join("../blockchain/testdata/", filename) + fi, err := os.Open(filename) if err != nil { - return blocks, err - } - - if strings.HasSuffix(filename, ".bz2") { - dr = bzip2.NewReader(fi) - } else { - dr = fi + return nil, err } + bcStream := bzip2.NewReader(fi) defer fi.Close() - var block *btcutil.Block + // Create a buffer of the read file + bcBuf := new(bytes.Buffer) + bcBuf.ReadFrom(bcStream) - err = nil - for height := int64(1); err == nil; height++ { - var rintbuf uint32 - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - if err == io.EOF { - // hit end of file at expected offset: no warning - height-- - err = nil - break - } - if err != nil { - break - } - if rintbuf != uint32(network) { - break - } - err = binary.Read(dr, binary.LittleEndian, &rintbuf) - if err != nil { - return blocks, err - } - blocklen := rintbuf - - rbytes := make([]byte, blocklen) - - // read block - numbytes, err := dr.Read(rbytes) - if err != nil { - return blocks, err - } - if uint32(numbytes) != blocklen { - return blocks, io.ErrUnexpectedEOF - } - - block, err = btcutil.NewBlockFromBytes(rbytes) + // Create decoder from the buffer and a map to store the data + bcDecoder := gob.NewDecoder(bcBuf) + blockchain := make(map[int64][]byte) + + // Decode the blockchain into the map + if err := bcDecoder.Decode(&blockchain); err != nil { + return nil, err + } + + var block *dcrutil.Block + + blocks := make([]*dcrutil.Block, 0, len(blockchain)) + for height := int64(0); height < int64(len(blockchain)); height++ { + block, err = dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { return blocks, err } + block.SetHeight(height) blocks = append(blocks, block) } diff --git a/database/testdata/blocks1-256.bz2 b/database/testdata/blocks1-256.bz2 deleted file mode 100644 index 6b8bda44..00000000 Binary files a/database/testdata/blocks1-256.bz2 and /dev/null differ diff --git a/database/testdata/reorgblocks.bz2 b/database/testdata/reorgblocks.bz2 deleted file mode 100644 index 1e0285e5..00000000 Binary files a/database/testdata/reorgblocks.bz2 and /dev/null differ diff --git a/btcd.go b/dcrd.go similarity index 63% rename from btcd.go rename to dcrd.go index a3274232..d1c0a68a 100644 --- a/btcd.go +++ b/dcrd.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,8 +13,9 @@ import ( "os" "runtime" "runtime/pprof" + "time" - "github.com/btcsuite/btcd/limits" + "github.com/decred/dcrd/limits" ) var ( @@ -21,16 +23,16 @@ var ( shutdownChannel = make(chan struct{}) ) -// winServiceMain is only invoked on Windows. It detects when btcd is running +// winServiceMain is only invoked on Windows. It detects when dcrd is running // as a service and reacts accordingly. var winServiceMain func() (bool, error) -// btcdMain is the real main function for btcd. It is necessary to work around +// dcrdMain is the real main function for dcrd. It is necessary to work around // the fact that deferred functions do not run when os.Exit() is called. The // optional serverChan parameter is mainly used by the service code to be // notified with the server once it is setup so it can gracefully stop it when // requested from the service control manager. -func btcdMain(serverChan chan<- *server) error { +func dcrdMain(serverChan chan<- *server) error { // Load configuration and parse command line. This function also // initializes logging and configures it accordingly. tcfg, _, err := loadConfig() @@ -41,17 +43,21 @@ func btcdMain(serverChan chan<- *server) error { defer backendLog.Flush() // Show version at startup. - btcdLog.Infof("Version %s", version()) + dcrdLog.Infof("Version %s", version()) // Enable http profiling server if requested. if cfg.Profile != "" { go func() { listenAddr := net.JoinHostPort("", cfg.Profile) - btcdLog.Infof("Profile server listening on %s", listenAddr) + dcrdLog.Infof("Creating profiling server "+ + "listening on %s", listenAddr) profileRedirect := http.RedirectHandler("/debug/pprof", http.StatusSeeOther) http.Handle("/", profileRedirect) - btcdLog.Errorf("%v", http.ListenAndServe(listenAddr, nil)) + err := http.ListenAndServe(listenAddr, nil) + if err != nil { + fatalf(err.Error()) + } }() } @@ -59,7 +65,7 @@ func btcdMain(serverChan chan<- *server) error { if cfg.CPUProfile != "" { f, err := os.Create(cfg.CPUProfile) if err != nil { - btcdLog.Errorf("Unable to create cpu profile: %v", err) + dcrdLog.Errorf("Unable to create cpu profile: %v", err.Error()) return err } pprof.StartCPUProfile(f) @@ -67,47 +73,73 @@ func btcdMain(serverChan chan<- *server) error { defer pprof.StopCPUProfile() } - // Perform upgrades to btcd as new versions require it. + // Write mem profile if requested. + if cfg.MemProfile != "" { + f, err := os.Create(cfg.MemProfile) + if err != nil { + dcrdLog.Errorf("Unable to create cpu profile: %v", err) + return err + } + timer := time.NewTimer(time.Minute * 20) // 20 minutes + go func() { + <-timer.C + pprof.WriteHeapProfile(f) + f.Close() + }() + } + + // Perform upgrades to dcrd as new versions require it. if err := doUpgrades(); err != nil { - btcdLog.Errorf("%v", err) + dcrdLog.Errorf("%v", err) return err } // Load the block database. db, err := loadBlockDB() if err != nil { - btcdLog.Errorf("%v", err) + dcrdLog.Errorf("%v", err) return err } defer db.Close() if cfg.DropAddrIndex { - btcdLog.Info("Deleting entire addrindex.") - err := db.DeleteAddrIndex() + dcrdLog.Info("Deleting entire addrindex.") + err := db.PurgeAddrIndex() if err != nil { - btcdLog.Errorf("Unable to delete the addrindex: %v", err) + dcrdLog.Errorf("Unable to delete the addrindex: %v", err) return err } - btcdLog.Info("Successfully deleted addrindex, exiting") + dcrdLog.Info("Successfully deleted addrindex, exiting") return nil } - // Ensure the database is sync'd and closed on Ctrl+C. + tmdb, err := loadTicketDB(db, activeNetParams.Params) + if err != nil { + dcrdLog.Errorf("%v", err) + return err + } + defer tmdb.Close() + + // Ensure the databases are sync'd and closed on Ctrl+C. addInterruptHandler(func() { - btcdLog.Infof("Gracefully shutting down the database...") + dcrdLog.Infof("Gracefully shutting down the database...") + err := tmdb.Store(cfg.DataDir, "ticketdb.gob") + if err != nil { + dcrdLog.Errorf("Failed to store ticket database: %v", err.Error()) + } db.RollbackClose() }) // Create server and start it. - server, err := newServer(cfg.Listeners, db, activeNetParams.Params) + server, err := newServer(cfg.Listeners, db, tmdb, activeNetParams.Params) if err != nil { // TODO(oga) this logging could do with some beautifying. - btcdLog.Errorf("Unable to start server on %v: %v", + dcrdLog.Errorf("Unable to start server on %v: %v", cfg.Listeners, err) return err } addInterruptHandler(func() { - btcdLog.Infof("Gracefully shutting down the server...") + dcrdLog.Infof("Gracefully shutting down the server...") server.Stop() server.WaitForShutdown() }) @@ -131,7 +163,7 @@ func btcdMain(serverChan chan<- *server) error { // Wait for shutdown signal from either a graceful server stop or from // the interrupt handler. <-shutdownChannel - btcdLog.Info("Shutdown complete") + dcrdLog.Info("Shutdown complete") return nil } @@ -159,7 +191,7 @@ func main() { } // Work around defer not working after os.Exit() - if err := btcdMain(nil); err != nil { + if err := dcrdMain(nil); err != nil { os.Exit(1) } } diff --git a/dcrec/edwards/ciphering.go b/dcrec/edwards/ciphering.go new file mode 100644 index 00000000..507100e5 --- /dev/null +++ b/dcrec/edwards/ciphering.go @@ -0,0 +1,201 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "errors" + "io" +) + +var ( + // ErrInvalidMAC occurs when Message Authentication Check (MAC) fails + // during decryption. This happens because of either invalid private key or + // corrupt ciphertext. + ErrInvalidMAC = errors.New("invalid mac hash") + + // errInputTooShort occurs when the input ciphertext to the Decrypt + // function is less than 134 bytes long. + errInputTooShort = errors.New("ciphertext too short") + + // errUnsupportedCurve occurs when the first two bytes of the encrypted + // text aren't 0x02CA (= 712 = secp256k1, from OpenSSL). + errUnsupportedCurve = errors.New("unsupported curve") + errInvalidYLength = errors.New("invalid Y length, must be 32") + errInvalidPadding = errors.New("invalid PKCS#7 padding") + + // 0xFFFF = 65535 + ciphCurveBytes = [2]byte{0xFF, 0xFF} + // 0x20 = 32 + ciphCoordLength = [2]byte{0x00, 0x20} +) + +// GenerateSharedSecret generates a shared secret based on a private key and a +// private key using Diffie-Hellman key exchange (ECDH) (RFC 4753). +// RFC5903 Section 9 states we should only return y. +func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte { + x, y := pubkey.Curve.ScalarMult(pubkey.X, pubkey.Y, privkey.ecPk.D.Bytes()) + return BigIntPointToEncodedBytes(x, y)[:] +} + +// Encrypt encrypts data for the target public key using AES-256-CBC. It also +// generates a private key (the pubkey of which is also in the output). +// +// struct { +// // Initialization Vector used for AES-256-CBC +// IV [16]byte +// // Public Key: curve(2) + len_of_pubkeyX(2) + pubkeyY (curve = 0xFFFF) +// PublicKey [36]byte +// // Cipher text +// Data []byte +// // HMAC-SHA-256 Message Authentication Code +// HMAC [32]byte +// } +// +// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly, +// refer to section 5.8.1 of ANSI X9.63 for rationale on this format. +func Encrypt(curve *TwistedEdwardsCurve, pubkey *PublicKey, in []byte) ([]byte, + error) { + ephemeral, err := GeneratePrivateKey(curve) + if err != nil { + return nil, err + } + ecdhKey := GenerateSharedSecret(ephemeral, pubkey) + derivedKey := sha512.Sum512(ecdhKey) + keyE := derivedKey[:32] + keyM := derivedKey[32:] + + paddedIn := addPKCSPadding(in) + // IV + Curve params/X/Y + padded plaintext/ciphertext + HMAC-256 + out := make([]byte, aes.BlockSize+36+len(paddedIn)+sha256.Size) + iv := out[:aes.BlockSize] + if _, err = io.ReadFull(rand.Reader, iv); err != nil { + return nil, err + } + // start writing public key + ePubX, ePubY := ephemeral.Public() + pbk := NewPublicKey(curve, ePubX, ePubY) + pb := pbk.Serialize() + offset := aes.BlockSize + + // curve and Y length + copy(out[offset:offset+4], append(ciphCurveBytes[:], ciphCoordLength[:]...)) + offset += 4 + // Y + copy(out[offset:offset+32], pb[0:32]) + offset += 32 + + // start encryption + block, err := aes.NewCipher(keyE) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(out[offset:len(out)-sha256.Size], paddedIn) + + // start HMAC-SHA-256 + hm := hmac.New(sha256.New, keyM) + hm.Write(out[:len(out)-sha256.Size]) // everything is hashed + copy(out[len(out)-sha256.Size:], hm.Sum(nil)) // write checksum + + return out, nil +} + +// Decrypt decrypts data that was encrypted using the Encrypt function. +func Decrypt(curve *TwistedEdwardsCurve, priv *PrivateKey, in []byte) ([]byte, + error) { + // IV + Curve params/X/Y + 1 block + HMAC-256 + if len(in) < aes.BlockSize+36+aes.BlockSize+sha256.Size { + return nil, errInputTooShort + } + + // read iv + iv := in[:aes.BlockSize] + offset := aes.BlockSize + + // start reading pubkey + if !bytes.Equal(in[offset:offset+2], ciphCurveBytes[:]) { + return nil, errUnsupportedCurve + } + offset += 2 + + if !bytes.Equal(in[offset:offset+2], ciphCoordLength[:]) { + return nil, errInvalidYLength + } + offset += 2 + + yBytes := in[offset : offset+32] + offset += 32 + + pb := make([]byte, 32) + copy(pb[0:32], yBytes) + + // check if (X, Y) lies on the curve and create a Pubkey if it does + pubkey, err := ParsePubKey(curve, pb) + if err != nil { + return nil, err + } + + // check for cipher text length + if (len(in)-aes.BlockSize-offset-sha256.Size)%aes.BlockSize != 0 { + return nil, errInvalidPadding // not padded to 16 bytes + } + + // read hmac + messageMAC := in[len(in)-sha256.Size:] + + // generate shared secret + ecdhKey := GenerateSharedSecret(priv, pubkey) + derivedKey := sha512.Sum512(ecdhKey) + keyE := derivedKey[:32] + keyM := derivedKey[32:] + + // verify mac + hm := hmac.New(sha256.New, keyM) + hm.Write(in[:len(in)-sha256.Size]) // everything is hashed + expectedMAC := hm.Sum(nil) + if !hmac.Equal(messageMAC, expectedMAC) { + return nil, ErrInvalidMAC + } + + // start decryption + block, err := aes.NewCipher(keyE) + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + // same length as ciphertext + plaintext := make([]byte, len(in)-offset-sha256.Size) + mode.CryptBlocks(plaintext, in[offset:len(in)-sha256.Size]) + + return removePKCSPadding(plaintext) +} + +// Implement PKCS#7 padding with block size of 16 (AES block size). + +// addPKCSPadding adds padding to a block of data +func addPKCSPadding(src []byte) []byte { + padding := aes.BlockSize - len(src)%aes.BlockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + return append(src, padtext...) +} + +// removePKCSPadding removes padding from data that was added with addPKCSPadding +func removePKCSPadding(src []byte) ([]byte, error) { + length := len(src) + padLength := int(src[length-1]) + if padLength > aes.BlockSize || length < aes.BlockSize { + return nil, errInvalidPadding + } + + return src[:length-padLength], nil +} diff --git a/dcrec/edwards/ciphering_test.go b/dcrec/edwards/ciphering_test.go new file mode 100644 index 00000000..3f28937f --- /dev/null +++ b/dcrec/edwards/ciphering_test.go @@ -0,0 +1,200 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" +) + +func TestGenerateSharedSecret(t *testing.T) { + c := new(TwistedEdwardsCurve) + c.InitParam25519() + privKey1, err := GeneratePrivateKey(c) + if err != nil { + t.Errorf("private key generation error: %s", err) + return + } + privKey2, err := GeneratePrivateKey(c) + if err != nil { + t.Errorf("private key generation error: %s", err) + return + } + + pk1x, pk1y := privKey1.Public() + pk1 := NewPublicKey(c, pk1x, pk1y) + pk2x, pk2y := privKey2.Public() + pk2 := NewPublicKey(c, pk2x, pk2y) + secret1 := GenerateSharedSecret(privKey1, pk2) + secret2 := GenerateSharedSecret(privKey2, pk1) + + if !bytes.Equal(secret1, secret2) { + t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x", + secret1, secret2) + } +} + +// Test 1: Encryption and decryption +func TestCipheringBasic(t *testing.T) { + c := new(TwistedEdwardsCurve) + c.InitParam25519() + privkey, err := GeneratePrivateKey(c) + if err != nil { + t.Fatal("failed to generate private key") + } + + in := []byte("Hey there dude. How are you doing? This is a test.") + + pk1x, pk1y := privkey.Public() + pk1 := NewPublicKey(c, pk1x, pk1y) + out, err := Encrypt(c, pk1, in) + if err != nil { + t.Fatal("failed to encrypt:", err) + } + + dec, err := Decrypt(c, privkey, out) + if err != nil { + t.Fatal("failed to decrypt:", err) + } + + if !bytes.Equal(in, dec) { + t.Error("decrypted data doesn't match original") + } +} + +func TestCiphering(t *testing.T) { + c := new(TwistedEdwardsCurve) + c.InitParam25519() + + pb, _ := hex.DecodeString("fe38240982f313ae5afb3e904fb8215fb11af1200592b" + + "fca26c96c4738e4bf8f") + pbBig := new(big.Int).SetBytes(pb) + pbBig.Mod(pbBig, c.N) + pb = pbBig.Bytes() + pb = copyBytes(pb)[:] + privkey, pubkey, err := PrivKeyFromScalar(c, pb) + if err != nil { + t.Error(err) + } + in := []byte("This is just a test.") + localOut, err := Encrypt(c, pubkey, in) + if err != nil { + t.Error(err) + } + + out, _ := hex.DecodeString("1ffcb6f11fb9dc57222382019ae710b2ffff0020503f4" + + "117665f80b226961a4a0c0ae229f3b914d43e36238be05b0799623ae6ea0209d3095" + + "04f86635c50baca78d11189d4dc02c2f32c4c11e9d50b04eb2d3ff4b9f95e7f2e90e" + + "0f4a8d64a2a4149c27d21f88f2dedc200f4b609936c0d67ca98") + + dec, err := Decrypt(c, privkey, out) + if err != nil { + t.Fatal("failed to decrypt:", err) + } + + dec, err = Decrypt(c, privkey, localOut) + if err != nil { + t.Fatal("failed to decrypt:", err) + } + + if !bytes.Equal(in, dec) { + t.Error("decrypted data doesn't match original") + } +} + +func TestCipheringErrors(t *testing.T) { + c := new(TwistedEdwardsCurve) + c.InitParam25519() + + privkey, err := GeneratePrivateKey(c) + if err != nil { + t.Fatal("failed to generate private key") + } + + tests1 := []struct { + ciphertext []byte // input ciphertext + }{ + {bytes.Repeat([]byte{0x00}, 133)}, // errInputTooShort + {bytes.Repeat([]byte{0x00}, 134)}, // errUnsupportedCurve + {bytes.Repeat([]byte{0xFF, 0xFF}, 134)}, // errInvalidXLength + {bytes.Repeat([]byte{0xFF, 0xFF, 0x00, 0x20}, 134)}, // errInvalidYLength + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, + 0x00, 0x20, // Y length + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Y + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }}, // invalid pubkey + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, + 0x00, 0x20, // Y length + 0x7E, 0x76, 0xDC, 0x58, 0xF6, 0x93, 0xBD, 0x7E, // Y + 0x70, 0x10, 0x35, 0x8C, 0xE6, 0xB1, 0x65, 0xE4, + 0x83, 0xA2, 0x92, 0x10, 0x10, 0xDB, 0x67, 0xAC, + 0x11, 0xB1, 0xB5, 0x1B, 0x65, 0x19, 0x53, 0xD2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext + // padding not aligned to 16 bytes + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }}, // errInvalidPadding + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, + 0x00, 0x20, // Y length + 0x7E, 0x76, 0xDC, 0x58, 0xF6, 0x93, 0xBD, 0x7E, // Y + 0x70, 0x10, 0x35, 0x8C, 0xE6, 0xB1, 0x65, 0xE4, + 0x83, 0xA2, 0x92, 0x10, 0x10, 0xDB, 0x67, 0xAC, + 0x11, 0xB1, 0xB5, 0x1B, 0x65, 0x19, 0x53, 0xD2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }}, // ErrInvalidMAC + } + + for i, test := range tests1 { + _, err = Decrypt(c, privkey, test.ciphertext) + if err == nil { + t.Errorf("Decrypt #%d did not get error", i) + } + } + + // test error from removePKCSPadding + tests2 := []struct { + in []byte // input data + }{ + {bytes.Repeat([]byte{0x11}, 17)}, + {bytes.Repeat([]byte{0x07}, 15)}, + } + for i, test := range tests2 { + _, err = TstRemovePKCSPadding(test.in) + if err == nil { + t.Errorf("removePKCSPadding #%d did not get error", i) + } + } +} + +// TstRemovePKCSPadding makes the internal removePKCSPadding function available +// to the test package. +func TstRemovePKCSPadding(src []byte) ([]byte, error) { + return removePKCSPadding(src) +} diff --git a/dcrec/edwards/const.go b/dcrec/edwards/const.go new file mode 100644 index 00000000..5ec786dc --- /dev/null +++ b/dcrec/edwards/const.go @@ -0,0 +1,103 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "math/big" + + "github.com/decred/ed25519/edwards25519" +) + +var ( + // zero through eight are big.Int numbers useful in + // elliptical curve math. + zero = new(big.Int).SetInt64(0) + one = new(big.Int).SetInt64(1) + two = new(big.Int).SetInt64(2) + three = new(big.Int).SetInt64(3) + four = new(big.Int).SetInt64(4) + eight = new(big.Int).SetInt64(8) + + // fieldIntSize is the size of a field element encoded + // as bytes. + fieldIntSize = 32 + fieldElementSize = 10 + fieldElementBytesSize = 40 +) + +// feZero is the field element representation of zero. +var feZero = edwards25519.FieldElement{ + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, +} + +// feOne is the field element representation of one. This is +// also the neutral (null) element. +var feOne = edwards25519.FieldElement{ + 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, +} + +// feTwo is the field element representation of one. +var feTwo = edwards25519.FieldElement{ + 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, +} + +// feThree is the field element representation of one. +var feThree = edwards25519.FieldElement{ + 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, +} + +// feA is the field element representation of one. +var feA = edwards25519.FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// fed is the field element representation of D. +var fed = edwards25519.FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, + -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// fed2 is the field element representation of D^2. +var fed2 = edwards25519.FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, + 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// feSqrtM1 is the field element representation of M^(1/2). +var feSqrtM1 = edwards25519.FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, + -272473, -25146209, -2005654, 326686, 11406482, +} + +// feI is the field element representation of I. +var feI = edwards25519.FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, + -272473, -25146209, -2005654, 326686, 11406482, +} + +// feExtBasePoint is the base point of the curve represented +// in projective extended format with field elements. +var feExtBasePoint = edwards25519.ExtendedGroupElement{ + edwards25519.FieldElement{ + 25485296, 5318399, 8791791, -8299916, -14349720, + 6939349, -3324311, -7717049, 7287234, -6577708, + }, + edwards25519.FieldElement{ + -758052, -1832720, 13046421, -4857925, 6576754, + 14371947, -13139572, 6845540, -2198883, -4003719, + }, + edwards25519.FieldElement{ + -947565, 6097708, -469190, 10704810, -8556274, + -15589498, -16424464, -16608899, 14028613, -5004649, + }, + edwards25519.FieldElement{ + 6966464, -2456167, 7033433, 6781840, 28785542, + 12262365, -2659449, 13959020, -21013759, -5262166, + }, +} diff --git a/dcrec/edwards/curve.go b/dcrec/edwards/curve.go new file mode 100644 index 00000000..5fc2abaf --- /dev/null +++ b/dcrec/edwards/curve.go @@ -0,0 +1,412 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "crypto/elliptic" + "math/big" + + "github.com/decred/ed25519/edwards25519" +) + +// TwistedEdwardsCurve extended an elliptical curve set of +// parameters to satisfy the interface of the elliptic package. +type TwistedEdwardsCurve struct { + *elliptic.CurveParams + H int // Cofactor of the curve + + A, D, I *big.Int // Edwards curve equation parameter constants + + // byteSize is simply the bit size / 8 and is provided for convenience + // since it is calculated repeatedly. + byteSize int +} + +// Params returns the parameters for the curve. +func (curve TwistedEdwardsCurve) Params() *elliptic.CurveParams { + return curve.CurveParams +} + +// Marshal converts a point into the 32 byte encoded Ed25519 form. +func Marshal(curve TwistedEdwardsCurve, x, y *big.Int) []byte { + return BigIntPointToEncodedBytes(x, y)[:] +} + +// Unmarshall converts a point into the 32 byte encoded Ed25519 form. +func Unmarshal(curve *TwistedEdwardsCurve, data []byte) (x, y *big.Int) { + var err error + x, y, err = curve.EncodedBytesToBigIntPoint(copyBytes(data)) + if err != nil { + x = nil + y = nil + } + return +} + +// RecoverXBigInt recovers the X value for some Y value, for a coordinate +// on the Ed25519 curve given as a big integer Y value. +func (curve *TwistedEdwardsCurve) RecoverXBigInt(xIsNeg bool, + y *big.Int) *big.Int { + // (y^2 - 1) + l := new(big.Int).Mul(y, y) + l.Sub(l, one) + + // inv(d*y^2+1) + temp := new(big.Int).Mul(y, y) + temp.Mul(temp, curve.D) + temp.Add(temp, one) + r := curve.invert(temp) + + // x2 = (y^2 - 1) * invert(d*y^2+1) + x2 := new(big.Int).Mul(r, l) + + // x = exp(x^2,(P+3)/8, P) + qp3 := new(big.Int).Add(curve.P, three) + qp3.Div(qp3, eight) // /= curve.H + x := new(big.Int).Exp(x2, qp3, curve.P) + + // check (x^2 - x2) % q != 0 + x22 := new(big.Int).Mul(x, x) + xsub := new(big.Int).Sub(x22, x2) + xsub.Mod(xsub, curve.P) + if xsub.Cmp(zero) != 0 { + ximod := new(big.Int) + ximod.Mul(x, curve.I) + ximod.Mod(ximod, curve.P) + x.Set(ximod) + } + + xmod2 := new(big.Int).Mod(x, two) + if xmod2.Cmp(zero) != 0 { + x.Sub(curve.P, x) + } + + // We got the wrong x, negate it to get the right one. + if xIsNeg != (x.Bit(0) == 1) { + x.Sub(curve.P, x) + } + + return x +} + +// RecoverXFieldElement recovers the X value for some Y value, for a coordinate +// on the Ed25519 curve given as a field element. Y value. Probably the fastest +// way to get your respective X from Y. +func (curve *TwistedEdwardsCurve) RecoverXFieldElement(xIsNeg bool, + y *edwards25519.FieldElement) *edwards25519.FieldElement { + // (y^2 - 1) + l := new(edwards25519.FieldElement) + edwards25519.FeSquare(l, y) + edwards25519.FeSub(l, l, &feOne) + + // inv(d*y^2+1) + r := new(edwards25519.FieldElement) + edwards25519.FeSquare(r, y) + edwards25519.FeMul(r, r, &fed) + edwards25519.FeAdd(r, r, &feOne) + edwards25519.FeInvert(r, r) + + x2 := new(edwards25519.FieldElement) + edwards25519.FeMul(x2, r, l) + + // Get a big int so we can do the exponentiation. + x2Big := FieldElementToBigInt(x2) + + // x = exp(x^2,(P+3)/8, P) + qp3 := new(big.Int).Add(curve.P, three) + qp3.Div(qp3, eight) // /= curve.H + xBig := new(big.Int).Exp(x2Big, qp3, curve.P) + + // Convert back to a field element and do + // the rest. + x := BigIntToFieldElement(xBig) + + // check (x^2 - x2) % q != 0 + x22 := new(edwards25519.FieldElement) + edwards25519.FeSquare(x22, x) + xsub := new(edwards25519.FieldElement) + edwards25519.FeSub(xsub, x22, x2) + xsubBig := FieldElementToBigInt(xsub) + xsubBig.Mod(xsubBig, curve.P) + + if xsubBig.Cmp(zero) != 0 { + xi := new(edwards25519.FieldElement) + edwards25519.FeMul(xi, x, &feI) + xiModBig := FieldElementToBigInt(xi) + xiModBig.Mod(xiModBig, curve.P) + xiMod := BigIntToFieldElement(xiModBig) + + x = xiMod + } + + xBig = FieldElementToBigInt(x) + xmod2 := new(big.Int).Mod(xBig, two) + if xmod2.Cmp(zero) != 0 { + // TODO replace this with FeSub + xBig.Sub(curve.P, xBig) + x = BigIntToFieldElement(xBig) + } + + // We got the wrong x, negate it to get the right one. + isNegative := edwards25519.FeIsNegative(x) == 1 + if xIsNeg != isNegative { + edwards25519.FeNeg(x, x) + } + + return x +} + +// IsOnCurve returns bool to say if the point (x,y) is on the curve by +// checking (y^2 - x^2 - 1 - dx^2y^2) % P == 0. +func (curve *TwistedEdwardsCurve) IsOnCurve(x *big.Int, y *big.Int) bool { + // Convert to field elements. + xB := BigIntToEncodedBytes(x) + yB := BigIntToEncodedBytes(y) + + yfe := new(edwards25519.FieldElement) + xfe := new(edwards25519.FieldElement) + edwards25519.FeFromBytes(yfe, yB) + edwards25519.FeFromBytes(xfe, xB) + + x2 := new(edwards25519.FieldElement) + edwards25519.FeSquare(x2, xfe) + y2 := new(edwards25519.FieldElement) + edwards25519.FeSquare(y2, yfe) + + dx2y2 := new(edwards25519.FieldElement) + edwards25519.FeMul(dx2y2, &fed, x2) + edwards25519.FeMul(dx2y2, dx2y2, y2) + + enum := new(edwards25519.FieldElement) + edwards25519.FeSub(enum, y2, x2) + edwards25519.FeSub(enum, enum, &feOne) + edwards25519.FeSub(enum, enum, dx2y2) + + enumBig := FieldElementToBigInt(enum) + enumBig.Mod(enumBig, curve.P) + + if enumBig.Cmp(zero) != 0 { + return false + } + + // Check if we're in the cofactor of the curve (8). + modEight := new(big.Int) + modEight.Mod(enumBig, eight) + if modEight.Cmp(zero) != 0 { + return false + } + + return true +} + +// cachedGroupElement is a cached extended group element derived from +// another extended group element, for use in computation. +type cachedGroupElement struct { + yPlusX, yMinusX, Z, T2d edwards25519.FieldElement +} + +// toCached converts an extended group element to a useful intermediary +// containing precalculated values. +func toCached(r *cachedGroupElement, p *edwards25519.ExtendedGroupElement) { + edwards25519.FeAdd(&r.yPlusX, &p.Y, &p.X) + edwards25519.FeSub(&r.yMinusX, &p.Y, &p.X) + edwards25519.FeCopy(&r.Z, &p.Z) + edwards25519.FeMul(&r.T2d, &p.T, &fed2) +} + +// Add adds two points represented by pairs of big integers on the elliptical +// curve. +func (curve *TwistedEdwardsCurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + // Convert to extended from affine. + a := BigIntPointToEncodedBytes(x1, y1) + aEGE := new(edwards25519.ExtendedGroupElement) + aEGE.FromBytes(a) + + b := BigIntPointToEncodedBytes(x2, y2) + bEGE := new(edwards25519.ExtendedGroupElement) + bEGE.FromBytes(b) + + // Cache b for use in group element addition. + bCached := new(cachedGroupElement) + toCached(bCached, bEGE) + + p := aEGE + q := bCached + + // geAdd(r*CompletedGroupElement, p*ExtendedGroupElement, + // q*CachedGroupElement) + // r is the result. + r := new(edwards25519.CompletedGroupElement) + var t0 edwards25519.FieldElement + + edwards25519.FeAdd(&r.X, &p.Y, &p.X) + edwards25519.FeSub(&r.Y, &p.Y, &p.X) + edwards25519.FeMul(&r.Z, &r.X, &q.yPlusX) + edwards25519.FeMul(&r.Y, &r.Y, &q.yMinusX) + edwards25519.FeMul(&r.T, &q.T2d, &p.T) + edwards25519.FeMul(&r.X, &p.Z, &q.Z) + edwards25519.FeAdd(&t0, &r.X, &r.X) + edwards25519.FeSub(&r.X, &r.Z, &r.Y) + edwards25519.FeAdd(&r.Y, &r.Z, &r.Y) + edwards25519.FeAdd(&r.Z, &t0, &r.T) + edwards25519.FeSub(&r.T, &t0, &r.T) + + rEGE := new(edwards25519.ExtendedGroupElement) + r.ToExtended(rEGE) + + s := new([32]byte) + rEGE.ToBytes(s) + + x, y, _ = curve.EncodedBytesToBigIntPoint(s) + + return +} + +// Double adds the same pair of big integer coordinates to itself on the +// elliptical curve. +func (curve *TwistedEdwardsCurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + // Convert to extended projective coordinates. + a := BigIntPointToEncodedBytes(x1, y1) + aEGE := new(edwards25519.ExtendedGroupElement) + aEGE.FromBytes(a) + + r := new(edwards25519.CompletedGroupElement) + aEGE.Double(r) + rEGE := new(edwards25519.ExtendedGroupElement) + r.ToExtended(rEGE) + + s := new([32]byte) + rEGE.ToBytes(s) + x, y, _ = curve.EncodedBytesToBigIntPoint(s) + + return +} + +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. This +// uses the repeated doubling method, which is variable time. +// TODO use a constant time method to prevent side channel attacks. +func (curve *TwistedEdwardsCurve) ScalarMult(x1, y1 *big.Int, + k []byte) (x, y *big.Int) { + // Convert the scalar to a big int. + s := new(big.Int).SetBytes(k) + + // Get a new group element to do cached doubling + // calculations in. + dEGE := new(edwards25519.ExtendedGroupElement) + dEGE.Zero() + + // Use the doubling method for the multiplication. + // p := given point + // q := point(zero) + // for each bit in the scalar, descending: + // double(q) + // if bit == 1: + // add(q, p) + // return q + // + // Note that the addition is skipped for zero bits, + // making this variable time and thus vulnerable to + // side channel attack vectors. + for i := s.BitLen() - 1; i >= 0; i-- { + dCGE := new(edwards25519.CompletedGroupElement) + dEGE.Double(dCGE) + dCGE.ToExtended(dEGE) + if s.Bit(i) == 1 { + ss := new([32]byte) + dEGE.ToBytes(ss) + var err error + xi, yi, err := curve.EncodedBytesToBigIntPoint(ss) + if err != nil { + return nil, nil + } + xAdd, yAdd := curve.Add(xi, yi, x1, y1) + dTempBytes := BigIntPointToEncodedBytes(xAdd, yAdd) + dEGE.FromBytes(dTempBytes) + } + } + + finalBytes := new([32]byte) + dEGE.ToBytes(finalBytes) + + var err error + x, y, err = curve.EncodedBytesToBigIntPoint(finalBytes) + if err != nil { + return nil, nil + } + + return +} + +// ScalarBaseMult returns k*G, where G is the base point of the group +// and k is an integer in big-endian form. +// TODO Optimize this with field elements +func (curve *TwistedEdwardsCurve) ScalarBaseMult(k []byte) (x, y *big.Int) { + return curve.ScalarMult(curve.Gx, curve.Gy, k) +} + +// ScalarAdd adds two scalars and returns the sum mod N. +func ScalarAdd(a, b *big.Int) *big.Int { + feA := BigIntToFieldElement(a) + feB := BigIntToFieldElement(b) + sum := new(edwards25519.FieldElement) + + edwards25519.FeAdd(sum, feA, feB) + sumArray := new([32]byte) + edwards25519.FeToBytes(sumArray, sum) + + return EncodedBytesToBigInt(sumArray) +} + +// InitParam25519 initializes an instance of the Ed25519 curve. +func (curve *TwistedEdwardsCurve) InitParam25519() { + // The prime modulus of the field. + // P = 2^255-19 + curve.CurveParams = new(elliptic.CurveParams) + curve.P = new(big.Int) + curve.P.SetBit(zero, 255, 1).Sub(curve.P, big.NewInt(19)) + + // The prime order for the base point. + // N = 2^252 + 27742317777372353535851937790883648493 + qs, _ := new(big.Int).SetString("27742317777372353535851937790883648493", 10) + curve.N = new(big.Int) + curve.N.SetBit(zero, 252, 1).Add(curve.N, qs) // AKA Q + + curve.A = new(big.Int) + curve.A.SetInt64(-1).Add(curve.P, curve.A) + + // d = -121665 * inv(121666) + da := new(big.Int).SetInt64(-121665) + ds := new(big.Int).SetInt64(121666) + di := curve.invert(ds) + curve.D = new(big.Int).Mul(da, di) + + // I = expmod(2,(q-1)/4,q) + psn := new(big.Int) + psn.SetBit(zero, 255, 1).Sub(psn, big.NewInt(19)) + psn.Sub(psn, one) + psn.Div(psn, four) + curve.I = psn.Exp(two, psn, curve.P) + + // The base point. + curve.Gx = new(big.Int) + curve.Gx.SetString("151122213495354007725011514095885315"+ + "11454012693041857206046113283949847762202", 10) + curve.Gy = new(big.Int) + curve.Gy.SetString("463168356949264781694283940034751631"+ + "41307993866256225615783033603165251855960", 10) + + curve.BitSize = 256 + curve.H = 8 + + // Provided for convenience since this gets computed repeatedly. + curve.byteSize = curve.BitSize / 8 +} + +// Edwards returns a Curve which implements Ed25519. +func Edwards() *TwistedEdwardsCurve { + c := new(TwistedEdwardsCurve) + c.InitParam25519() + return c +} diff --git a/dcrec/edwards/curve_test.go b/dcrec/edwards/curve_test.go new file mode 100644 index 00000000..6455d396 --- /dev/null +++ b/dcrec/edwards/curve_test.go @@ -0,0 +1,200 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "encoding/hex" + "math/rand" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +type XRecoveryVector struct { + bIn *[32]byte +} + +func testPointXRecoveryVectors() []XRecoveryVector { + r := rand.New(rand.NewSource(54321)) + + numCvs := 1000 + cvs := make([]XRecoveryVector, numCvs, numCvs) + for i := 0; i < numCvs; i++ { + bIn := new([32]byte) + for j := 0; j < fieldIntSize; j++ { + randByte := r.Intn(255) + bIn[j] = uint8(randByte) + } + + cvs[i] = XRecoveryVector{bIn} + r.Seed(int64(i) + 54321) + } + + return cvs +} + +// Tested functions: +// BigIntPointToEncodedBytes +// extendedToBigAffine +// EncodedBytesToBigIntPoint +func TestXRecovery(t *testing.T) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + for _, vector := range testPointXRecoveryVectors() { + isNegative := vector.bIn[31]>>7 == 1 + notOnCurve := false + _, y, err := curve.EncodedBytesToBigIntPoint(vector.bIn) + // The random point wasn't on the curve. + if err != nil { + notOnCurve = true + } + + if notOnCurve { + y = EncodedBytesToBigInt(vector.bIn) + } + + x2 := curve.RecoverXBigInt(isNegative, y) + if !curve.IsOnCurve(x2, y) { + assert.Equal(t, notOnCurve, true) + } else { + assert.Equal(t, notOnCurve, false) + b2 := BigIntPointToEncodedBytes(x2, y) + assert.Equal(t, vector.bIn, b2) + } + + yFE := EncodedBytesToFieldElement(vector.bIn) + x3 := curve.RecoverXFieldElement(isNegative, yFE) + x3BI := FieldElementToBigInt(x3) + if !curve.IsOnCurve(x3BI, y) { + assert.Equal(t, notOnCurve, true) + } else { + assert.Equal(t, notOnCurve, false) + b3 := BigIntPointToEncodedBytes(x3BI, y) + assert.Equal(t, vector.bIn, b3) + } + } +} + +// Tested functions: +// BigIntPointToEncodedBytes +// extendedToBigAffine +// EncodedBytesToBigIntPoint +func TestAdd(t *testing.T) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + tpcv := testPointConversionVectors() + + for i, _ := range tpcv { + if i == 0 { + continue + } + + x1, y1, err := curve.EncodedBytesToBigIntPoint(tpcv[i-1].bIn) + // The random point wasn't on the curve. + if err != nil { + continue + } + point1AsStr := hex.EncodeToString(tpcv[i-1].bIn[:]) + + x2, y2, err := curve.EncodedBytesToBigIntPoint(tpcv[i].bIn) + // The random point wasn't on the curve. + if err != nil { + continue + } + point2AsStr := hex.EncodeToString(tpcv[i].bIn[:]) + + x, y := curve.Add(x1, y1, x2, y2) + pointEnc := BigIntPointToEncodedBytes(x, y) + pointEncAsStr := hex.EncodeToString(pointEnc[:]) + + // Test python lib point --> bytes versus our results. + args := []string{"testdata/addpoints.py", point1AsStr, point2AsStr} + pyHexStr, _ := exec.Command("python", args...).Output() + stripped := strings.TrimSpace(string(pyHexStr)) + assert.Equal(t, pointEncAsStr, string(stripped)) + } +} + +type ScalarMultVectorHex struct { + bIn string // Point + s string // 32 byte scalar + bRes string // Resulting point +} + +func testVectorsScalarMultHex() []ScalarMultVectorHex { + return []ScalarMultVectorHex{ + ScalarMultVectorHex{"a4185ac0436e4487cf2c4db66465b4167b0c884be5679ac2c6c5a675c2313216", "3628cff35868bbc95ae4c8d8d0536851a6597e6c96874a2c5b6cee489c1dda56", "0b398bc7c29f05c7d67411824173a9830936273eb4ffd7e546c9ef62bf59f821"}, + ScalarMultVectorHex{"7b4fae97c829420c9e132f2e1b0ad835f39af9c9d245c87121db68320f957729", "43d12a94e0dfed6489162197ff51769ece9c95a4a784b39926bc5e56703c1554", "d10b9f81cdc426daa6425f9c37057e7090102848927fdde0bb0b07191b33be02"}, + ScalarMultVectorHex{"5563cfdba3e2653b2b9f8bb43566e0b6b788713196ea65fc6fba9cd760c71205", "ed82427d43dded53c4b4fd204dba3d4bf09cf95e7821cd23c35b36180a23c00e", "f61f95ef730e1183dcdb455faea04a00f81d5a5f15e3ac709a39e0b206c72245"}, + ScalarMultVectorHex{"b20df028f5ae0409b7131e3165ce5e2c1982e32b6ae8cf2dc9369685444225f3", "19a4cf4eb98082b44460cc0d94ea482c20c96650f71e13d37c9b5b8b132709f9", "0ae96cfa06dd871e57c0423b390e05e3f92071f1efcaa11ba4d8a8a793fd6594"}, + ScalarMultVectorHex{"42ef8d3ff3b53d14a28992a47201072b4f34d65cc801a033b495502058f136be", "816295ac93196dcd99fd93497c525ae2c77ef28581f287784c18cdedf713ad1c", "df01249832f7802ecc1136946d7d9d422c6db8049f1ec22fe6cdea4732e8c11e"}, + ScalarMultVectorHex{"58e49f87bc33d252824c2673b1e6d03948a564b095ef0fcc2db9ab068859e761", "4e3604d93eb17aeacb600b5dd101756711e56edeb61b4e096d9831f1b678fecb", "4ea973a59bdab17664ee3e2d2c58048bfe5cb648b8316c170677692317f111d4"}, + ScalarMultVectorHex{"5b4406c40c5c8edbc4bc560a37336572425348a6613765e7285431855b35c1ae", "98aa0700f27292de4b01a519b322cfa1b6f2d5405c8a0aec011206db44f8661f", "d61f265b145f0a6b6fd03d6cc3fbf533ff3c5b1ab3f6889f357ab1b89fe59ad6"}, + ScalarMultVectorHex{"b11f09eac50b0e09980f5cbf12511329a7be3c011c7714555ecc99d3058c0c31", "c6b75a8ff9b5d642e78e75f1760de923378e2c117ff55d08e8c9471a9857b1a9", "e9a913f01dcf22c5529041accdcf84991241dd2459450c2cf09b1f7557969cfd"}, + ScalarMultVectorHex{"2ef244e1a0d29c85a4d9efcc76e73229474221b5dbc7d0a5e5d0a3fe7af2260a", "a17a0742c64b377c4d21b9374c18d2357705129891e493ae0a5ed3944859358d", "4394da4b755f93b5a071084b8d3ddd2d2b6fe051850ce020b78626b2cde3af0f"}, + ScalarMultVectorHex{"58a70db75e2728e9b8104cb0c0d3a34c0d9f7df962bd49d972c8ffd2f6d166ed", "f257355233d825370122f0b96a2d65862674773e4d8fc8a3e098d91df2269377", "43d399cbc1d906b2070e5bd2ea22ee7ec16acacf1c066eff83b66a4a4e6e045a"}, + ScalarMultVectorHex{"d28cec42d45b3a3412fa304cf7ec8569799c206c15d660bd397bae71038e2247", "5998863d2c6ec41609ca6ccf759b079c08ff4182080cd43ed94fd50ba76f1250", "c1f277c9a1a0fa5cdac73ddbeaea8b23bf953b8bb116a28927251ca1c66dbcfd"}, + ScalarMultVectorHex{"4ed7441db6ab3e5eaab93073c324f623c4dd43236b85544243c2ec8f4a470292", "c35e95a67108d5d540f5976ccdfe5efc365daac97678c1754d2de9b64562a5df", "bbb7a5f331f0edec2cbbbf03bc368b9e79be88a40b2ef5d037ff4e5f86526dc2"}, + ScalarMultVectorHex{"d7ca509d7fd8576dd3e1254f267d25693d610e1641df4aec416ade68eaab750a", "5166fa5902ec25c6956373d1c160a5a187fd265ed1def088b8737b82962c00b9", "1a631f41db3a8aa1e6373b6c1826aed1cf1c5b726dfa3a6f3aedf725139edeb7"}, + ScalarMultVectorHex{"fb6ef51f9d2d8113e5715f7f383a438e0af9e2a938981bbbaccaf23af1c44846", "bcf743bc9ca831ef5affe4408416526d7f97be16c6489e8480e21cd5a4896230", "6449a7997a35c140e296dd52ca6c2374a39a37a6328b42f80cebcad080ae12fd"}, + ScalarMultVectorHex{"1c6309c2895303ecb3315b3d557e2544d2d2af78bdbaeda61c9334cedf9416a3", "cf6b3b461f20b49f863292e990b2813b4ea1ca07b1372e423511e01f2ce6aa9f", "9f91711ddf5cc8127bebf3faf1bc919f8db37ab35c4a8ab621314d4532568ab7"}, + ScalarMultVectorHex{"1433d904b51a4a9d3654b8415fde9ce577652e64db634d0d8a103bfbd5f4d259", "56ec65e69dd0f10377c764fa5caca260af73d44a9ed2b838a3485055cd6216e1", "ef18bc7bea44254276a8755940ee97fa436b4ac904f9e8232af69591dcca99ad"}, + ScalarMultVectorHex{"662bdf3aabd9fc2c252ce38a272ac84303211653e0d88bc9b25ae4a6b9009813", "1472fd6a26b8ac15518eb66c4fab44f28568f3f127d40d70b643fc1b85ec35cc", "ca2627334a90483fa6f2532e83fc38fed0e75f2388feee99cc69102297508b23"}, + ScalarMultVectorHex{"a2e6a7c28f87bb95ea611fcfc3e804dd772e3b6dad6e60b3b6d3aed1136bc009", "cd9a9ffe3a346baa5ac98c504d197690139b53ebb01227dcebda6de94143c9ee", "fc010f6110e82a1fc9df7c3d0e9b3d6bd550e32d41bb4aaca4330086faa974df"}, + ScalarMultVectorHex{"0d2ed068749d471e0ccc53cbdcbb11022f2f6e5fe82faaf662566739ad93a46a", "5102094e6e5ede9a1d74dad9f41f19842f32a378e92f9b8fe46c49513148ba98", "adcfbb2da1b6b3fdf49ff91a103ce18b5fa7cdc03dc96d5eea62db88fef24444"}, + ScalarMultVectorHex{"3025fb1259e8bf5b629d167035a83bc4bae1a4d92790706da471c77e83bbff65", "46ddd06a554a5d65a0bb513ccfa962d27e8bb658f2c0c08124ea8b61b3912457", "afa21072bffee77e9df0d0d3afae7c31723cc7a52f82269a315f385b5b19b9d4"}, + ScalarMultVectorHex{"b3429d95ac09581860251b1da2d54c6d361e3e3a116d0f153c9d29a8b2eb14e9", "854ca13b76c837eda31fb06232c8e2c56396a09a8c4fcc517a5a54c1dc2d01ea", "298dccfe0d70ce69e0e12613008901b484c7c6b9ae3c62cbe5152b8a12c22296"}, + ScalarMultVectorHex{"efa0976786194dd52174f436ea27d4e7dc9599f7ce4424bfbac4cdc8f05388d8", "1f253bc2a828874fc46adc255f6203928bb77a84b7d545ee3746c4686d411206", "71851029252000821bd156cce117ab193471f1e2eba21897403bf8307bc666e4"}, + ScalarMultVectorHex{"fbc1f969efdd138c7c3cfeb7cfa906ca20c3522533a14745350074a57045f411", "3f82d419f4da810ca0e64a4e6e5fb3011b919417d2edc3e01a4cdb8fb97ed8a5", "15f9defa2f0b6ceca4b298535a924a55f8b45ce9ee101f9b273744dab6783704"}, + ScalarMultVectorHex{"9777c96b78a9878add14578e759246aeb7678554ea843439acd346d24563ee52", "c67fd95f24cf5b7dab1bd3fe84d6ab0b5a751ee6f125d9693f96da90b57ae366", "9cef760dd9d3371f68948f6c3b8b60b3a3e5e4d2d6f7f2204e391786f66f53ab"}, + ScalarMultVectorHex{"58848e020b5d60fafbfb1b193b6d011ddc4342b4fd9846ff53793d780ffd97a9", "35ce524d1411196917a8819bd5e14482867b0443b675d08bddf7a363d03d8743", "fde89cbd9c19e4d6e6bcf1a8aa8243954249cf19d2d0910b38bffb5b7cf40289"}, + ScalarMultVectorHex{"3e4ba1e84c427835ec9d7f5f1ad242d286f691ee7193c0ed4b9cf17ce38388e3", "4292f0dbedcffeecb0e26c4d56debc2a9b22e963bd430a44942a6538677137e9", "56b47522d793c0f3fed33bec100fa0dbdec79f5e00ca9e7003884410e8b7f8a1"}, + ScalarMultVectorHex{"f330b3cdeef0b31f1e8e0787cee38e81216a0a90f536c81c33293d577ec32cb4", "143aff9e186d585b9167eb60780f2ed424751431d141e19ae42d6745cbd77e41", "d82cb0b18e90f2bdf2fc6b0ac34ec53325e19262dec88c22eddc4874207c35b6"}, + ScalarMultVectorHex{"adf0a708f1c3e0a0e56b1d74f91861462a2c20619afded35560e6a57db9df026", "de62849260fae4e4f4a707ffa0f95a6a7b1a16b2b6083fa93f4f7bfbaeb6823f", "7362db606fafb2c5b336eb36760ea87cb7412fe1e2ac152f22450074e6a14972"}, + ScalarMultVectorHex{"48c1eb698fc29e1ffb28d5bbdddd896343935d574e4800c9ec336da133db7253", "41a37cae219b9b5097ecb76530f3de685d7c7aaa48f8a5237a6525bb84d4c9b7", "d834ea9ea06272263059948e107c8f083c197a76a3dec6b76ee169729ee6f11f"}, + ScalarMultVectorHex{"93cc6d1727b2bd5f918563cf8571a9c0f6b771c25cb9aa47ebdff08bbbf00f17", "ad00a73d9a3f54b29e3e3a65d4b35f28aa0bc6b3aa1e82d628489368aa4db441", "6f4789dd5250e097c2c2025f31920d974dc58ce5d204437a2db578ae5d2aa544"}, + ScalarMultVectorHex{"4f651b5f0918f14ea45433ed332dc4bf46456e076903ede2313694b96faa8c40", "12a331c658d41a8a97de1db8bd392b53558fb2f1724c904d476652d5e82eea95", "0fa278fb83a017f1058ef7fb937747ab8646d4d5cac694a4a3e1b24340ba71d9"}, + } +} + +type ScalarMultVector struct { + bIn *[32]byte // Point + s *[32]byte // 32 byte scalar + bRes *[32]byte // Resulting point +} + +func testVectorsScalarMult() []ScalarMultVector { + tvsmh := testVectorsScalarMultHex() + tvsms := make([]ScalarMultVector, 0) + for _, v := range tvsmh { + bIn, _ := hex.DecodeString(v.bIn) + s, _ := hex.DecodeString(v.s) + bRes, _ := hex.DecodeString(v.bRes) + lv := ScalarMultVector{copyBytes(bIn), copyBytes(s), copyBytes(bRes)} + tvsms = append(tvsms, lv) + } + + return tvsms +} + +// Tested functions: +// Add +// Double +// ScalarMult +func TestScalarMult(t *testing.T) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + for _, vector := range testVectorsScalarMult() { + x, y, _ := curve.EncodedBytesToBigIntPoint(vector.bIn) + sBig := EncodedBytesToBigInt(vector.s) // We need big endian + xMul, yMul := curve.ScalarMult(x, y, sBig.Bytes()) + finalPoint := BigIntPointToEncodedBytes(xMul, yMul) + assert.Equal(t, vector.bRes, finalPoint) + } +} diff --git a/dcrec/edwards/ecdsa.go b/dcrec/edwards/ecdsa.go new file mode 100644 index 00000000..50b5f81d --- /dev/null +++ b/dcrec/edwards/ecdsa.go @@ -0,0 +1,358 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "crypto/hmac" + "fmt" + "hash" + "io" + "math/big" + + "crypto/sha512" + "github.com/btcsuite/fastsha256" + "github.com/decred/ed25519" + "github.com/decred/ed25519/edwards25519" +) + +// BIG CAVEAT +// Memory management is kind of sloppy and whether or not your keys or +// nonces can be found in memory later is likely a product of when the +// garbage collector runs. +// Signing/EC mult is also not constant side, so don't use this in any +// application where you think you might be vulnerable to side channel +// attacks. + +var ( + // oneInitializer is used to fill a byte slice with byte 0x01. It is provided + // here to avoid the need to create it multiple times. + oneInitializer = []byte{0x01} + + // ecTypeEdwards is the ECDSA type for the chainec interface. + ecTypeEdwards int = 1 +) + +// GenerateKey generates a key using a random number generator, returning +// the private scalar and the corresponding public key points from a +// random secret. +func GenerateKey(curve *TwistedEdwardsCurve, rand io.Reader) (priv []byte, x, + y *big.Int, err error) { + var pub *[PubKeyBytesLen]byte + var privArray *[PrivKeyBytesLen]byte + pub, privArray, err = ed25519.GenerateKey(rand) + priv = privArray[:] + + x, y, err = curve.EncodedBytesToBigIntPoint(pub) + if err != nil { + return nil, nil, nil, err + } + + return +} + +// Sign signs a message 'hash' using the given private key priv. It doesn't +// actually user the random reader (the lib is maybe deterministic???). +func SignFromSecret(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, + err error) { + r, s, err = SignFromSecretNoReader(priv, hash) + + return +} + +// Sign signs a message 'hash' using the given private key priv. It doesn't +// actually user the random reader. +func SignFromSecretNoReader(priv *PrivateKey, hash []byte) (r, s *big.Int, + err error) { + privBytes := priv.SerializeSecret() + privArray := copyBytes64(privBytes) + sig := ed25519.Sign(privArray, hash) + + // The signatures are encoded as + // sig[0:32] R, a point encoded as little endian + // sig[32:64] S, scalar multiplication/addition results = (ab+c) mod l + // encoded also as little endian + rBytes := copyBytes(sig[0:32]) + r = EncodedBytesToBigInt(rBytes) + sBytes := copyBytes(sig[32:64]) + s = EncodedBytesToBigInt(sBytes) + + return +} + +// nonceRFC6979 is a local instatiation of deterministic nonce generation +// by the standards of RFC6979. +func nonceRFC6979(curve *TwistedEdwardsCurve, privkey []byte, hash []byte, + extra []byte, version []byte) []byte { + pkD := new(big.Int).SetBytes(privkey) + defer pkD.SetInt64(0) + kBig := NonceRFC6979(curve, pkD, hash, extra, version) + defer kBig.SetInt64(0) + k := BigIntToEncodedBytesNoReverse(kBig) + return k[:] +} + +// NonceRFC6979 generates an ECDSA nonce (`k`) deterministically according to +// RFC 6979. It takes a 32-byte hash as an input and returns 32-byte nonce to +// be used in ECDSA algorithm. +func NonceRFC6979(curve *TwistedEdwardsCurve, privkey *big.Int, hash []byte, + extra []byte, version []byte) *big.Int { + q := curve.Params().N + x := privkey + alg := fastsha256.New + + qlen := q.BitLen() + holen := alg().Size() + rolen := (qlen + 7) >> 3 + bx := append(int2octets(x, rolen), bits2octets(hash, curve, rolen)...) + if len(extra) == 32 { + bx = append(bx, extra...) + } + if len(version) == 16 && len(extra) == 32 { + bx = append(bx, extra...) + } + if len(version) == 16 && len(extra) != 32 { + bx = append(bx, bytes.Repeat([]byte{0x00}, 32)...) + bx = append(bx, version...) + } + + // Step B + v := bytes.Repeat(oneInitializer, holen) + + // Step C (Go zeroes the all allocated memory) + k := make([]byte, holen) + + // Step D + k = mac(alg, k, append(append(v, 0x00), bx...)) + + // Step E + v = mac(alg, k, v) + + // Step F + k = mac(alg, k, append(append(v, 0x01), bx...)) + + // Step G + v = mac(alg, k, v) + + // Step H + for { + // Step H1 + var t []byte + + // Step H2 + for len(t)*8 < qlen { + v = mac(alg, k, v) + t = append(t, v...) + } + + // Step H3 + secret := hashToInt(t, curve) + if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 { + return secret + } + k = mac(alg, k, append(v, 0x00)) + v = mac(alg, k, v) + } +} + +// hashToInt converts a hash value to an integer. There is some disagreement +// about how this is done. [NSA] suggests that this is done in the obvious +// manner, but [SECG] truncates the hash to the bit-length of the curve order +// first. We follow [SECG] because that's what OpenSSL does. Additionally, +// OpenSSL right shifts excess bits from the number if the hash is too large +// and we mirror that too. +// This is borrowed from crypto/ecdsa. +func hashToInt(hash []byte, c *TwistedEdwardsCurve) *big.Int { + orderBits := c.Params().N.BitLen() + orderBytes := (orderBits + 7) / 8 + if len(hash) > orderBytes { + hash = hash[:orderBytes] + } + + ret := new(big.Int).SetBytes(hash) + excess := len(hash)*8 - orderBits + if excess > 0 { + ret.Rsh(ret, uint(excess)) + } + return ret +} + +// mac returns an HMAC of the given key and message. +func mac(alg func() hash.Hash, k, m []byte) []byte { + h := hmac.New(alg, k) + h.Write(m) + return h.Sum(nil) +} + +// https://tools.ietf.org/html/rfc6979#section-2.3.3 +func int2octets(v *big.Int, rolen int) []byte { + out := v.Bytes() + + // left pad with zeros if it's too short + if len(out) < rolen { + out2 := make([]byte, rolen) + copy(out2[rolen-len(out):], out) + return out2 + } + + // drop most significant bytes if it's too long + if len(out) > rolen { + out2 := make([]byte, rolen) + copy(out2, out[len(out)-rolen:]) + return out2 + } + + return out +} + +// https://tools.ietf.org/html/rfc6979#section-2.3.4 +func bits2octets(in []byte, curve *TwistedEdwardsCurve, rolen int) []byte { + z1 := hashToInt(in, curve) + z2 := new(big.Int).Sub(z1, curve.Params().N) + if z2.Sign() < 0 { + return int2octets(z1, rolen) + } + return int2octets(z2, rolen) +} + +// SignFromScalar signs a message 'hash' using the given private scalar priv. +// It uses RFC6979 to generate a deterministic nonce. Considered experimental. +// r = kG, where k is the RFC6979 nonce +// s = r + hash512(k || A || M) * a +func SignFromScalar(curve *TwistedEdwardsCurve, priv *PrivateKey, + nonce []byte, hash []byte) (r, s *big.Int, err error) { + publicKey := new([PubKeyBytesLen]byte) + var A edwards25519.ExtendedGroupElement + privateScalar := copyBytes(priv.Serialize()) + reverse(privateScalar) // BE --> LE + edwards25519.GeScalarMultBase(&A, privateScalar) + A.ToBytes(publicKey) + + // For signing from a scalar, r = nonce. + nonceLE := copyBytes(nonce) + reverse(nonceLE) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, nonceLE) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + // h = hash512(k || A || M) + h := sha512.New() + h.Reset() + h.Write(encodedR[:]) + h.Write(publicKey[:]) + h.Write(hash) + + // s = r + h * a + var hramDigest [64]byte + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var localS [32]byte + edwards25519.ScMulAdd(&localS, &hramDigestReduced, privateScalar, + nonceLE) + + signature := new([64]byte) + copy(signature[:], encodedR[:]) + copy(signature[32:], localS[:]) + sigEd, err := ParseSignature(curve, signature[:]) + if err != nil { + return nil, nil, err + } + + return sigEd.GetR(), sigEd.GetS(), nil +} + +// SignThreshold signs a message 'hash' using the given private scalar priv in +// a threshold group signature. It uses RFC6979 to generate a deterministic nonce. +// Considered experimental. +// As opposed to the threshold signing function for secp256k1, this function +// takes the entirety of the public nonce point (all points added) instead of +// the public nonce point with n-1 keys added. +// r = K_Sum +// s = r + hash512(k || A || M) * a +func SignThreshold(curve *TwistedEdwardsCurve, priv *PrivateKey, + groupPub *PublicKey, hash []byte, privNonce *PrivateKey, + pubNonceSum *PublicKey) (r, s *big.Int, err error) { + if priv == nil || hash == nil || privNonce == nil || pubNonceSum == nil { + return nil, nil, fmt.Errorf("nil input") + } + + privateScalar := copyBytes(priv.Serialize()) + reverse(privateScalar) // BE --> LE + + // Threshold variant scheme: + // R = K_Sum + // Where K_Sum is the sum of the public keys corresponding to + // the private nonce scalars of each group signature member. + // That is, R = k1G + ... + knG. + encodedGroupR := BigIntPointToEncodedBytes(pubNonceSum.GetX(), + pubNonceSum.GetY()) + + // h = hash512(k || A || M) + var hramDigest [64]byte + h := sha512.New() + h.Reset() + h.Write(encodedGroupR[:]) + h.Write(groupPub.Serialize()[:]) + h.Write(hash) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + // s = r + h * a + var localS [32]byte + privNonceLE := copyBytes(privNonce.Serialize()) + reverse(privNonceLE) // BE --> LE + edwards25519.ScMulAdd(&localS, &hramDigestReduced, privateScalar, + privNonceLE) + + signature := new([64]byte) + copy(signature[:], encodedGroupR[:]) + copy(signature[32:], localS[:]) + sigEd, err := ParseSignature(curve, signature[:]) + if err != nil { + return nil, nil, err + } + + return sigEd.GetR(), sigEd.GetS(), nil +} + +// Sign is the generalized and exported version of Ed25519 signing, that +// handles both standard private secrets and non-standard scalars. +func Sign(curve *TwistedEdwardsCurve, priv *PrivateKey, hash []byte) (r, + s *big.Int, err error) { + if priv == nil { + return nil, nil, fmt.Errorf("private key is nil") + } + if hash == nil { + return nil, nil, fmt.Errorf("message key is nil") + } + + if priv.secret == nil { + privLE := copyBytes(priv.Serialize()) + reverse(privLE) + nonce := nonceRFC6979(curve, privLE[:], hash, nil, nil) + return SignFromScalar(curve, priv, nonce, hash) + } + + return SignFromSecretNoReader(priv, hash) +} + +// Verify verifies a message 'hash' using the given public keys and signature. +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + if pub == nil || hash == nil || r == nil || s == nil { + return false + } + + pubBytes := pub.Serialize() + sig := &Signature{r, s} + sigBytes := sig.Serialize() + pubArray := copyBytes(pubBytes) + sigArray := copyBytes64(sigBytes) + return ed25519.Verify(pubArray, hash, sigArray) +} diff --git a/dcrec/edwards/ecdsa_test.go b/dcrec/edwards/ecdsa_test.go new file mode 100644 index 00000000..8bf3d342 --- /dev/null +++ b/dcrec/edwards/ecdsa_test.go @@ -0,0 +1,401 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bufio" + "bytes" + "compress/gzip" + "encoding/hex" + "io" + "math/big" + "math/rand" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGolden(t *testing.T) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + // sign.input.gz is a selection of test cases from + // http://ed25519.cr.yp.to/python/sign.input + testDataZ, err := os.Open("testdata/sign.input.gz") + if err != nil { + t.Fatal(err) + } + defer testDataZ.Close() + testData, err := gzip.NewReader(testDataZ) + if err != nil { + t.Fatal(err) + } + defer testData.Close() + + in := bufio.NewReaderSize(testData, 1<<12) + lineNo := 0 + for { + lineNo++ + lineBytes, err := in.ReadBytes(byte('\n')) + if err != nil { + if err == io.EOF { + break + } + t.Fatalf("error reading test data: %s", err) + } + + line := string(lineBytes) + parts := strings.Split(line, ":") + if len(parts) != 5 { + t.Fatalf("bad number of parts on line %d (want %v, got %v)", lineNo, + 5, len(parts)) + } + + privBytes, _ := hex.DecodeString(parts[0]) + privArray := copyBytes64(privBytes) + + pubKeyBytes, _ := hex.DecodeString(parts[1]) + pubArray := copyBytes(pubKeyBytes) + msg, _ := hex.DecodeString(parts[2]) + sig, _ := hex.DecodeString(parts[3]) + sigArray := copyBytes64(sig) + // The signatures in the test vectors also include the message + // at the end, but we just want R and S. + sig = sig[:SignatureSize] + + if l := len(pubKeyBytes); l != PubKeyBytesLen { + t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l) + } + + var priv [PrivKeyBytesLen]byte + copy(priv[:], privBytes) + copy(priv[32:], pubKeyBytes) + + // Deserialize privkey and test functions. + privkeyS1, pubkeyS1 := PrivKeyFromSecret(curve, priv[:32]) + privkeyS2, pubkeyS2 := PrivKeyFromBytes(curve, priv[:]) + pkS1 := privkeyS1.SerializeSecret() + pkS2 := privkeyS2.SerializeSecret() + pubkS1 := pubkeyS1.Serialize() + pubkS2 := pubkeyS2.Serialize() + assert.Equal(t, pkS1, pkS2) + assert.Equal(t, privArray, copyBytes64(pkS1)) + assert.Equal(t, privArray, copyBytes64(pkS2)) + assert.Equal(t, pubkS1, pubkS2) + assert.Equal(t, pubArray, copyBytes(pubkS1)) + assert.Equal(t, pubArray, copyBytes(pubkS2)) + + // Deserialize pubkey and test functions. + pubkeyP, err := ParsePubKey(curve, pubKeyBytes) + pubkP := pubkeyP.Serialize() + assert.Equal(t, pubkS1, pubkP) + assert.Equal(t, pubkS2, pubkP) + assert.Equal(t, pubArray, copyBytes(pubkP)) + + // Deserialize signature and test functions. + internalSig, err := ParseSignature(curve, sig) + iSigSerialized := internalSig.Serialize() + assert.Equal(t, sigArray, copyBytes64(iSigSerialized)) + + sig2r, sig2s, err := Sign(curve, privkeyS2, msg) + sig2 := &Signature{sig2r, sig2s} + sig2B := sig2.Serialize() + if !bytes.Equal(sig, sig2B[:]) { + t.Errorf("different signature result on line %d: %x vs %x", lineNo, + sig, sig2B[:]) + } + + var pubKey [PubKeyBytesLen]byte + copy(pubKey[:], pubKeyBytes) + if !Verify(pubkeyP, msg, sig2r, sig2s) { + t.Errorf("signature failed to verify on line %d", lineNo) + } + } +} + +func randPrivScalarKeyList(curve *TwistedEdwardsCurve, i int) []*PrivateKey { + r := rand.New(rand.NewSource(54321)) + + privKeyList := make([]*PrivateKey, i, i) + for j := 0; j < i; j++ { + for { + bIn := new([32]byte) + for k := 0; k < PrivScalarSize; k++ { + randByte := r.Intn(255) + bIn[k] = uint8(randByte) + } + + bInBig := new(big.Int).SetBytes(bIn[:]) + bInBig.Mod(bInBig, curve.N) + bIn = copyBytes(bInBig.Bytes()) + bIn[31] &= 248 + + pks, _, err := PrivKeyFromScalar(curve, bIn[:]) + if err != nil { + r.Seed(int64(j) + r.Int63n(12345)) + continue + } + + // No duplicates allowed. + if j > 0 && + (bytes.Equal(pks.Serialize(), privKeyList[j-1].Serialize())) { + continue + } + + privKeyList[j] = pks + r.Seed(int64(j) + 54321) + break + } + } + + return privKeyList +} + +func TestNonStandardSignatures(t *testing.T) { + tRand := rand.New(rand.NewSource(54321)) + + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + msg := []byte{ + 0xbe, 0x13, 0xae, 0xf4, + 0xe8, 0xa2, 0x00, 0xb6, + 0x45, 0x81, 0xc4, 0xd1, + 0x0c, 0xf4, 0x1b, 0x5b, + 0xe1, 0xd1, 0x81, 0xa7, + 0xd3, 0xdc, 0x37, 0x55, + 0x58, 0xc1, 0xbd, 0xa2, + 0x98, 0x2b, 0xd9, 0xfb, + } + + pks := randPrivScalarKeyList(curve, 50) + for _, pk := range pks { + r, s, err := Sign(curve, pk, msg) + assert.NoError(t, err) + + pubX, pubY := pk.Public() + pub := NewPublicKey(curve, pubX, pubY) + ok := Verify(pub, msg, r, s) + assert.True(t, ok) + + // Test serializing/deserializing. + privKeyDupTest, _, err := PrivKeyFromScalar(curve, + copyBytes(pk.ecPk.D.Bytes())[:]) + assert.NoError(t, err) + assert.Equal(t, privKeyDupTest.GetD(), pk.GetD()) + privKeyDupTest2, _, err := PrivKeyFromScalar(curve, pk.Serialize()) + assert.NoError(t, err) + assert.Equal(t, privKeyDupTest2.GetD(), pk.GetD()) + + // Screw up a random bit in the signature and + // make sure it still fails. + sig := NewSignature(r, s) + sigBad := sig.Serialize() + pos := tRand.Intn(63) + bitPos := tRand.Intn(7) + sigBad[pos] ^= 1 << uint8(bitPos) + + bSig, err := ParseSignature(curve, sigBad) + if err != nil { + // Signature failed to parse, continue. + continue + } + ok = Verify(pub, msg, bSig.GetR(), bSig.GetS()) + assert.False(t, ok) + + // Screw up a random bit in the pubkey and + // make sure it still fails. + pkBad := pub.Serialize() + pos = tRand.Intn(31) + if pos == 0 { + // 0th bit in first byte doesn't matter + bitPos = tRand.Intn(6) + 1 + } else { + bitPos = tRand.Intn(7) + } + pkBad[pos] ^= 1 << uint8(bitPos) + bPub, err := ParsePubKey(curve, pkBad) + if err == nil && bPub != nil { + ok = Verify(bPub, msg, r, s) + assert.False(t, ok) + } + } +} + +func randPrivKeyList(curve *TwistedEdwardsCurve, i int) []*PrivateKey { + r := rand.New(rand.NewSource(54321)) + + privKeyList := make([]*PrivateKey, i, i) + for j := 0; j < i; j++ { + for { + bIn := new([32]byte) + for k := 0; k < fieldIntSize; k++ { + randByte := r.Intn(255) + bIn[k] = uint8(randByte) + } + + pks, _ := PrivKeyFromSecret(curve, bIn[:]) + if pks == nil { + continue + } + if j > 0 && + (bytes.Equal(pks.Serialize(), privKeyList[j-1].Serialize())) { + r.Seed(int64(j) + r.Int63n(12345)) + continue + } + + privKeyList[j] = pks + r.Seed(int64(j) + 54321) + break + } + } + + return privKeyList +} + +func benchmarkSigning(b *testing.B) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + r := rand.New(rand.NewSource(54321)) + msg := []byte{ + 0xbe, 0x13, 0xae, 0xf4, + 0xe8, 0xa2, 0x00, 0xb6, + 0x45, 0x81, 0xc4, 0xd1, + 0x0c, 0xf4, 0x1b, 0x5b, + 0xe1, 0xd1, 0x81, 0xa7, + 0xd3, 0xdc, 0x37, 0x55, + 0x58, 0xc1, 0xbd, 0xa2, + 0x98, 0x2b, 0xd9, 0xfb, + } + + numKeys := 1024 + privKeyList := randPrivKeyList(curve, numKeys) + + for n := 0; n < b.N; n++ { + randIndex := r.Intn(numKeys - 1) + _, _, err := Sign(curve, privKeyList[randIndex], msg) + if err != nil { + panic("sign failure") + } + } +} + +func BenchmarkSigning(b *testing.B) { benchmarkSigning(b) } + +func benchmarkSigningNonStandard(b *testing.B) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + r := rand.New(rand.NewSource(54321)) + msg := []byte{ + 0xbe, 0x13, 0xae, 0xf4, + 0xe8, 0xa2, 0x00, 0xb6, + 0x45, 0x81, 0xc4, 0xd1, + 0x0c, 0xf4, 0x1b, 0x5b, + 0xe1, 0xd1, 0x81, 0xa7, + 0xd3, 0xdc, 0x37, 0x55, + 0x58, 0xc1, 0xbd, 0xa2, + 0x98, 0x2b, 0xd9, 0xfb, + } + + numKeys := 250 + privKeyList := randPrivScalarKeyList(curve, numKeys) + + for n := 0; n < b.N; n++ { + randIndex := r.Intn(numKeys - 1) + _, _, err := Sign(curve, privKeyList[randIndex], msg) + if err != nil { + panic("sign failure") + } + } +} + +func BenchmarkSigningNonStandard(b *testing.B) { benchmarkSigningNonStandard(b) } + +type SignatureVerParams struct { + pubkey *PublicKey + msg []byte + sig *Signature +} + +func randSigList(curve *TwistedEdwardsCurve, i int) []*SignatureVerParams { + r := rand.New(rand.NewSource(54321)) + + privKeyList := make([]*PrivateKey, i, i) + for j := 0; j < i; j++ { + for { + bIn := new([32]byte) + for k := 0; k < fieldIntSize; k++ { + randByte := r.Intn(255) + bIn[k] = uint8(randByte) + } + + pks, _ := PrivKeyFromSecret(curve, bIn[:]) + if pks == nil { + continue + } + privKeyList[j] = pks + r.Seed(int64(j) + 54321) + break + } + } + + msgList := make([][]byte, i, i) + for j := 0; j < i; j++ { + m := make([]byte, 32, 32) + for k := 0; k < fieldIntSize; k++ { + randByte := r.Intn(255) + m[k] = uint8(randByte) + } + msgList[j] = m + r.Seed(int64(j) + 54321) + } + + sigsList := make([]*Signature, i, i) + for j := 0; j < i; j++ { + r, s, err := Sign(curve, privKeyList[j], msgList[j]) + if err != nil { + panic("sign failure") + } + sig := &Signature{r, s} + sigsList[j] = sig + } + + sigStructList := make([]*SignatureVerParams, i, i) + for j := 0; j < i; j++ { + ss := new(SignatureVerParams) + pkx, pky := privKeyList[j].Public() + ss.pubkey = NewPublicKey(curve, pkx, pky) + ss.msg = msgList[j] + ss.sig = sigsList[j] + sigStructList[j] = ss + } + + return sigStructList +} + +func benchmarkVerification(b *testing.B) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + r := rand.New(rand.NewSource(54321)) + + numSigs := 1024 + sigList := randSigList(curve, numSigs) + + for n := 0; n < b.N; n++ { + randIndex := r.Intn(numSigs - 1) + ver := Verify(sigList[randIndex].pubkey, + sigList[randIndex].msg, + sigList[randIndex].sig.R, + sigList[randIndex].sig.S) + if ver != true { + panic("made invalid sig") + } + } +} + +func BenchmarkVerification(b *testing.B) { benchmarkVerification(b) } diff --git a/dcrec/edwards/primitives.go b/dcrec/edwards/primitives.go new file mode 100644 index 00000000..8113ead6 --- /dev/null +++ b/dcrec/edwards/primitives.go @@ -0,0 +1,314 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/decred/ed25519/edwards25519" +) + +// Some notes on primitives in Ed25519: +// 1) The integers themselves are stored as 32-byte little endian +// representations. If the store value is a point, the bit in +// the 31st byte, seventh position (b[31]>>7) represents whether +// or not the X value retrieved from the Y value should be +// negative or not. Remember, in affine EC space, the negative +// is P - positiveX. The rest of the 255 bits then represent +// the Y-value in little endian. +// 2) For high effiency, 40 byte field elements (10x int32s) are +// often used to represent integers. +// 3) For further increases in efficiency, the affine (cartesian) +// coordinates are converted into projective (extended or non- +// extended) formats, which include a Z and T or Z value +// respectively. +// 4) Almost *everything* is encoded in little endian, with the +// exception of ECDSA X and Y values of points in affine space. + +// reverse reverses a byte string. +func reverse(s *[32]byte) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// copyBytes copies a byte slice to a 32 byte array. +func copyBytes(aB []byte) *[32]byte { + if aB == nil { + return nil + } + s := new([32]byte) + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < fieldIntSize { + diff := fieldIntSize - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < fieldIntSize; i++ { + s[i] = aB[i] + } + + return s +} + +// copyBytes64 copies a byte slice to a 64 byte array. +func copyBytes64(aB []byte) *[64]byte { + if aB == nil { + return nil + } + + s := new([64]byte) + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < 64 { + diff := 64 - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < 64; i++ { + s[i] = aB[i] + } + + return s +} + +// zeroArray zeroes the memory of a scalar array. +func zeroArray(a *[PrivScalarSize]byte) { + for i := 0; i < PrivScalarSize; i++ { + a[i] = 0x00 + } + + return +} + +// zeroSlice zeroes the memory of a scalar byte slice. +func zeroSlice(s []byte) { + for i := 0; i < PrivScalarSize; i++ { + s[i] = 0x00 + } + + return +} + +// BigIntToEncodedBytes converts a big integer into its corresponding +// 32 byte little endian representation. +func BigIntToEncodedBytes(a *big.Int) *[32]byte { + s := new([32]byte) + if a == nil { + return s + } + // Caveat: a can be longer than 32 bytes. + aB := a.Bytes() + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < fieldIntSize { + diff := fieldIntSize - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < fieldIntSize; i++ { + s[i] = aB[i] + } + + // Reverse the byte string --> little endian after + // encoding. + reverse(s) + + return s +} + +// BigIntToEncodedBytesNoReverse converts a big integer into its corresponding +// 32 byte big endian representation. +func BigIntToEncodedBytesNoReverse(a *big.Int) *[32]byte { + s := new([32]byte) + if a == nil { + return s + } + // Caveat: a can be longer than 32 bytes. + aB := a.Bytes() + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < fieldIntSize { + diff := fieldIntSize - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < fieldIntSize; i++ { + s[i] = aB[i] + } + + return s +} + +// BigIntToFieldElement converts a big little endian integer into its corresponding +// 40 byte field representation. +func BigIntToFieldElement(a *big.Int) *edwards25519.FieldElement { + aB := BigIntToEncodedBytes(a) + fe := new(edwards25519.FieldElement) + edwards25519.FeFromBytes(fe, aB) + return fe +} + +// BigIntPointToEncodedBytes converts an affine point to a compressed +// 32 byte integer representation. +func BigIntPointToEncodedBytes(x *big.Int, y *big.Int) *[32]byte { + s := BigIntToEncodedBytes(y) + xB := BigIntToEncodedBytes(x) + xFE := new(edwards25519.FieldElement) + edwards25519.FeFromBytes(xFE, xB) + isNegative := edwards25519.FeIsNegative(xFE) == 1 + + if isNegative { + s[31] |= (1 << 7) + } else { + s[31] &^= (1 << 7) + } + + return s +} + +// EncodedBytesToBigInt converts a 32 byte little endian representation of +// an integer into a big, big endian integer. +func EncodedBytesToBigInt(s *[32]byte) *big.Int { + // Use a copy so we don't screw up our original + // memory. + sCopy := new([32]byte) + for i := 0; i < fieldIntSize; i++ { + sCopy[i] = s[i] + } + reverse(sCopy) + + bi := new(big.Int).SetBytes(sCopy[:]) + + return bi +} + +// EncodedBytesToBigIntNoReverse converts a 32 byte big endian representation of +// an integer into a big little endian integer. +func EncodedBytesToBigIntNoReverse(s *[32]byte) *big.Int { + // Use a copy so we don't screw up our original + // memory. + sCopy := new([32]byte) + for i := 0; i < fieldIntSize; i++ { + sCopy[i] = s[i] + } + + bi := new(big.Int).SetBytes(sCopy[:]) + + return bi +} + +// extendedToBigAffine converts projective x, y, and z field elements into +// affine x and y coordinates, and returns whether or not the x value +// returned is negative. +func (curve *TwistedEdwardsCurve) extendedToBigAffine(xi, yi, + zi *edwards25519.FieldElement) (*big.Int, *big.Int, bool) { + var recip, x, y edwards25519.FieldElement + + // Normalize to Z=1. + edwards25519.FeInvert(&recip, zi) + edwards25519.FeMul(&x, xi, &recip) + edwards25519.FeMul(&y, yi, &recip) + + isNegative := edwards25519.FeIsNegative(&x) == 1 + + return FieldElementToBigInt(&x), FieldElementToBigInt(&y), isNegative +} + +// EncodedBytesToBigIntPoint converts a 32 byte representation of a point +// on the elliptical curve into a big integer point. It returns an error +// if the point does not fall on the curve. +func (curve *TwistedEdwardsCurve) EncodedBytesToBigIntPoint(s *[32]byte) (*big.Int, + *big.Int, error) { + sCopy := new([32]byte) + for i := 0; i < fieldIntSize; i++ { + sCopy[i] = s[i] + } + + xIsNegBytes := sCopy[31]>>7 == 1 + p := new(edwards25519.ExtendedGroupElement) + if p.FromBytes(sCopy) == false { + return nil, nil, fmt.Errorf("point not on curve") + } + + // Normalize the X and Y coordinates in affine space. + x, y, isNegative := curve.extendedToBigAffine(&p.X, &p.Y, &p.Z) + + // We got the wrong sign; flip the bit and recalculate. + if xIsNegBytes != isNegative { + x.Sub(curve.P, x) + } + + // This should hopefully never happen, since the + // library itself should never let us create a bad + // point. + if !curve.IsOnCurve(x, y) { + return nil, nil, fmt.Errorf("point not on curve") + } + + return x, y, nil +} + +// EncodedBytesToFieldElement converts a 32 byte little endian integer into +// a field element. +func EncodedBytesToFieldElement(s *[32]byte) *edwards25519.FieldElement { + fe := new(edwards25519.FieldElement) + edwards25519.FeFromBytes(fe, s) + return fe +} + +// FieldElementToBigInt converts a 40 byte field element into a big int. +func FieldElementToBigInt(fe *edwards25519.FieldElement) *big.Int { + s := new([32]byte) + edwards25519.FeToBytes(s, fe) + reverse(s) + + aBI := new(big.Int).SetBytes(s[:]) + + return aBI +} + +// FieldElementToBigInt converts a 40 byte field element into a 32 byte +// little endian integer. +func FieldElementToEncodedBytes(fe *edwards25519.FieldElement) *[32]byte { + s := new([32]byte) + edwards25519.FeToBytes(s, fe) + return s +} + +// feEqual checks if two field elements equate. +func feEqual(a, b *edwards25519.FieldElement) bool { + aB := new([32]byte) + edwards25519.FeToBytes(aB, a) + bB := new([32]byte) + edwards25519.FeToBytes(bB, b) + return bytes.Equal(aB[:], bB[:]) +} + +// invert inverts a big integer over the Ed25519 curve. +func (curve *TwistedEdwardsCurve) invert(a *big.Int) *big.Int { + sub2 := new(big.Int).Sub(curve.P, two) + inv := new(big.Int).Exp(a, sub2, curve.P) + return inv +} diff --git a/dcrec/edwards/primitives_test.go b/dcrec/edwards/primitives_test.go new file mode 100644 index 00000000..3cd61072 --- /dev/null +++ b/dcrec/edwards/primitives_test.go @@ -0,0 +1,140 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "encoding/hex" + "math/rand" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +type ConversionVector struct { + bIn *[32]byte +} + +func testConversionVectors() []ConversionVector { + r := rand.New(rand.NewSource(12345)) + + numCvs := 50 + cvs := make([]ConversionVector, numCvs, numCvs) + for i := 0; i < numCvs; i++ { + bIn := new([32]byte) + for j := 0; j < fieldIntSize; j++ { + randByte := r.Intn(255) + bIn[j] = uint8(randByte) + } + + // Zero out the LSB as these aren't points. + bIn[31] = bIn[31] &^ (1 << 7) + cvs[i] = ConversionVector{bIn} + r.Seed(int64(i) + 12345) + } + + return cvs +} + +// Tested functions: +// EncodedBytesToBigInt +// BigIntToFieldElement +// FieldElementToEncodedBytes +// BigIntToEncodedBytes +// FieldElementToBigInt +// EncodedBytesToFieldElement +func TestConversion(t *testing.T) { + for _, vector := range testConversionVectors() { + // Test encoding to FE --> bytes. + feFB := EncodedBytesToFieldElement(vector.bIn) + feTB := FieldElementToEncodedBytes(feFB) + assert.Equal(t, vector.bIn, feTB) + + // Test encoding to big int --> FE --> bytes. + big := EncodedBytesToBigInt(vector.bIn) + fe := BigIntToFieldElement(big) + b := FieldElementToEncodedBytes(fe) + assert.Equal(t, vector.bIn, b) + + // Test encoding to big int --> bytes. + b = BigIntToEncodedBytes(big) + assert.Equal(t, vector.bIn, b) + + // Test encoding FE --> big int --> bytes. + feBig := FieldElementToBigInt(fe) + b = BigIntToEncodedBytes(feBig) + assert.Equal(t, vector.bIn, b) + + // Test python lib bytes --> int vs our results. + args := []string{"testdata/decodeint.py", hex.EncodeToString(vector.bIn[:])} + pyNumStr, _ := exec.Command("python", args...).Output() + stripped := strings.TrimSpace(string(pyNumStr)) + assert.Equal(t, stripped, big.String()) + + // Test python lib int --> bytes versus our results. + args = []string{"testdata/encodeint.py", big.String()} + pyHexStr, _ := exec.Command("python", args...).Output() + stripped = strings.TrimSpace(string(pyHexStr)) + assert.Equal(t, hex.EncodeToString(vector.bIn[:]), string(stripped)) + } +} + +func testPointConversionVectors() []ConversionVector { + r := rand.New(rand.NewSource(54321)) + + numCvs := 50 + cvs := make([]ConversionVector, numCvs, numCvs) + for i := 0; i < numCvs; i++ { + bIn := new([32]byte) + for j := 0; j < fieldIntSize; j++ { + randByte := r.Intn(255) + bIn[j] = uint8(randByte) + } + + cvs[i] = ConversionVector{bIn} + r.Seed(int64(i) + 54321) + } + + return cvs +} + +// Tested functions: +// BigIntPointToEncodedBytes +// extendedToBigAffine +// EncodedBytesToBigIntPoint +func TestPointConversion(t *testing.T) { + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + for _, vector := range testPointConversionVectors() { + x, y, err := curve.EncodedBytesToBigIntPoint(vector.bIn) + // The random point wasn't on the curve. + if err != nil { + continue + } + + yB := BigIntPointToEncodedBytes(x, y) + assert.Equal(t, vector.bIn, yB) + + // Test python lib bytes --> point vs our results. + args := []string{"testdata/decodepoint.py", hex.EncodeToString(vector.bIn[:])} + pyNumStr, _ := exec.Command("python", args...).Output() + stripped := strings.TrimSpace(string(pyNumStr)) + var buffer bytes.Buffer + buffer.WriteString(x.String()) + buffer.WriteString(",") + buffer.WriteString(y.String()) + localStr := buffer.String() + assert.Equal(t, localStr, stripped) + + // Test python lib point --> bytes versus our results. + args = []string{"testdata/encodepoint.py", x.String(), y.String()} + pyHexStr, _ := exec.Command("python", args...).Output() + stripped = strings.TrimSpace(string(pyHexStr)) + assert.Equal(t, hex.EncodeToString(vector.bIn[:]), string(stripped)) + } +} diff --git a/dcrec/edwards/privkey.go b/dcrec/edwards/privkey.go new file mode 100644 index 00000000..6c8db22d --- /dev/null +++ b/dcrec/edwards/privkey.go @@ -0,0 +1,202 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha512" + "fmt" + "math/big" + + "github.com/decred/ed25519" +) + +// These constants define the lengths of serialized private keys. +const ( + PrivScalarSize = 32 + PrivKeyBytesLen = 64 +) + +// PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing +// things with the the private key without having to directly import the ecdsa +// package. +type PrivateKey struct { + ecPk *ecdsa.PrivateKey + secret *[32]byte +} + +// NewPrivateKey instantiates a new private key from a scalar encoded as a +// big integer. +func NewPrivateKey(curve *TwistedEdwardsCurve, d *big.Int) *PrivateKey { + dArray := BigIntToEncodedBytes(d) + priv, _ := PrivKeyFromSecret(curve, dArray[:]) + return priv +} + +// NewPrivateKey is a wrapper for ecdsa.GenerateKey that returns a PrivateKey +// instead of the normal ecdsa.PrivateKey. +func GeneratePrivateKey(curve *TwistedEdwardsCurve) (*PrivateKey, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + pk := new(PrivateKey) + pk.ecPk = key + + return pk, nil +} + +// computeScalar obtains a private scalar from a private key. +func computeScalar(privateKey *[PrivKeyBytesLen]byte) *[PrivScalarSize]byte { + h := sha512.New() + h.Write(privateKey[:32]) + digest := h.Sum(nil) + + digest[0] &= 248 // Make a multiple of 8 + digest[31] &= 127 // Zero the first bit of this byte + digest[31] |= 64 // Enable the seventh bit + + var hBytes [PrivScalarSize]byte + copy(hBytes[:], digest) + + return &hBytes +} + +// PrivKeyFromBytes returns a private and public key for `curve' based on the +// private key passed as an argument as a byte slice. +func PrivKeyFromBytes(curve *TwistedEdwardsCurve, + pkBytes []byte) (*PrivateKey, *PublicKey) { + if len(pkBytes) != PrivKeyBytesLen { + return nil, nil + } + pk := copyBytes64(pkBytes) + + // The ed25519 library does a weird thing where it generates a + // private key 64 bytes long, and stores the private scalar in + // the first 32 bytes and the public key in the last 32 bytes. + // So, make sure we only grab the actual scalar we care about. + privKeyBytes := pk[0:32] + pubKeyBytes := pk[32:64] + pubKey, err := ParsePubKey(curve, pubKeyBytes) + if err != nil { + return nil, nil + } + + priv := &ecdsa.PrivateKey{ + PublicKey: *pubKey.ToECDSA(), + D: new(big.Int).SetBytes(computeScalar(pk)[:]), + } + privEd := new(PrivateKey) + privEd.ecPk = priv + privEd.secret = copyBytes(privKeyBytes) + + return privEd, (*PublicKey)(&priv.PublicKey) +} + +// PrivKeyFromSecret returns a private and public key for `curve' based on the +// 32-byte private key secret passed as an argument as a byte slice. +func PrivKeyFromSecret(curve *TwistedEdwardsCurve, s []byte) (*PrivateKey, + *PublicKey) { + if len(s) != PrivKeyBytesLen/2 { + return nil, nil + } + + // Instead of using rand to generate our scalar, use the scalar + // itself as a reader. + sReader := bytes.NewReader(s) + _, pk, err := ed25519.GenerateKey(sReader) + if err != nil { + return nil, nil + } + + return PrivKeyFromBytes(curve, pk[:]) +} + +// PrivKeyFromSecret returns a private and public key for `curve' based on the +// 32-byte private scalar passed as an argument as a byte slice (encoded big +// endian int). +func PrivKeyFromScalar(curve *TwistedEdwardsCurve, p []byte) (*PrivateKey, + *PublicKey, error) { + if len(p) != PrivScalarSize { + return nil, nil, fmt.Errorf("bad private scalar size") + } + + pk := new(PrivateKey) + pk.ecPk = new(ecdsa.PrivateKey) + pk.ecPk.D = new(big.Int).SetBytes(p) + + // The scalar must be in the subgroup. + if pk.ecPk.D.Cmp(curve.N) > 0 { + return nil, nil, fmt.Errorf("not on subgroup (>N)") + } + + // The scalar must not be zero or negative. + if pk.ecPk.D.Cmp(zero) <= 0 { + return nil, nil, fmt.Errorf("zero or negative scalar") + } + + pk.ecPk.Curve = curve + pk.ecPk.PublicKey.X, pk.ecPk.PublicKey.Y = + curve.ScalarBaseMult(pk.GetD().Bytes()) + + if pk.ecPk.PublicKey.X == nil || pk.ecPk.PublicKey.Y == nil { + return nil, nil, fmt.Errorf("scalarbase mult failure to get pubkey") + } + pub := PublicKey(pk.ecPk.PublicKey) + + return pk, &pub, nil +} + +// Public returns the PublicKey corresponding to this private key. +func (p PrivateKey) Public() (*big.Int, *big.Int) { + return p.ecPk.PublicKey.X, p.ecPk.PublicKey.Y +} + +// ToECDSA returns the private key as a *ecdsa.PrivateKey. +func (p PrivateKey) ToECDSA() *ecdsa.PrivateKey { + return p.ecPk +} + +// Serialize returns the private key as a 32 byte big endian number. +func (p PrivateKey) Serialize() []byte { + if p.ecPk.D == nil || + p.ecPk.PublicKey.X == nil || + p.ecPk.PublicKey.Y == nil { + return nil + } + privateScalar := copyBytes(p.ecPk.D.Bytes()) + + return privateScalar[:] +} + +// SerializeSecret returns the 32 byte secret along with its public key as 64 +// bytes. +func (p PrivateKey) SerializeSecret() []byte { + if p.secret == nil { + return nil + } + + // This is little endian. + pubX, pubY := p.Public() + spk := BigIntPointToEncodedBytes(pubX, pubY) + + all := append(p.secret[:], spk[:]...) + + return all +} + +// GetD satisfies the chainec PrivateKey interface. +func (p PrivateKey) GetD() *big.Int { + return p.ecPk.D +} + +// GetType satisfies the chainec PrivateKey interface. +func (p PrivateKey) GetType() int { + return ecTypeEdwards +} diff --git a/dcrec/edwards/pubkey.go b/dcrec/edwards/pubkey.go new file mode 100644 index 00000000..fa91d313 --- /dev/null +++ b/dcrec/edwards/pubkey.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/big" +) + +// These constants define the lengths of serialized public keys. +const ( + PubKeyBytesLen = 32 +) + +// PublicKey is an ecdsa.PublicKey with an additional function to +// serialize. +type PublicKey ecdsa.PublicKey + +// NewPublicKey instantiates a new public key. +func NewPublicKey(curve *TwistedEdwardsCurve, x *big.Int, y *big.Int) *PublicKey { + return &PublicKey{curve, x, y} +} + +// ParsePubKey parses a public key for an edwards curve from a bytestring into a +// ecdsa.Publickey, verifying that it is valid. +func ParsePubKey(curve *TwistedEdwardsCurve, pubKeyStr []byte) (key *PublicKey, + err error) { + pubkey := PublicKey{} + pubkey.Curve = curve + x, y, err := curve.EncodedBytesToBigIntPoint(copyBytes(pubKeyStr)) + if err != nil { + return nil, err + } + pubkey.X = x + pubkey.Y = y + + if len(pubKeyStr) == 0 { + return nil, errors.New("pubkey string is empty") + } + if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 { + return nil, fmt.Errorf("pubkey X parameter is >= to P") + } + if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 { + return nil, fmt.Errorf("pubkey Y parameter is >= to P") + } + + return &pubkey, nil +} + +// ToECDSA returns the public key as a *ecdsa.PublicKey. +func (p PublicKey) ToECDSA() *ecdsa.PublicKey { + pkecdsa := ecdsa.PublicKey(p) + return &pkecdsa +} + +// Serialize serializes a public key in a 32-byte compressed little endian format. +func (p PublicKey) Serialize() []byte { + if p.X == nil || p.Y == nil { + return nil + } + return BigIntPointToEncodedBytes(p.X, p.Y)[:] +} + +// SerializeUncompressed satisfies the chainec PublicKey interface. +func (p PublicKey) SerializeUncompressed() []byte { + return p.Serialize() +} + +// SerializeCompressed satisfies the chainec PublicKey interface. +func (p PublicKey) SerializeCompressed() []byte { + return p.Serialize() +} + +// SerializeHybrid satisfies the chainec PublicKey interface. +func (p PublicKey) SerializeHybrid() []byte { + return p.Serialize() +} + +// GetCurve satisfies the chainec PublicKey interface. +func (p PublicKey) GetCurve() interface{} { + return p.Curve +} + +// GetX satisfies the chainec PublicKey interface. +func (p PublicKey) GetX() *big.Int { + return p.X +} + +// GetY satisfies the chainec PublicKey interface. +func (p PublicKey) GetY() *big.Int { + return p.Y +} + +// GetType satisfies the chainec PublicKey interface. +func (p PublicKey) GetType() int { + return ecTypeEdwards +} diff --git a/dcrec/edwards/signature.go b/dcrec/edwards/signature.go new file mode 100644 index 00000000..3f035609 --- /dev/null +++ b/dcrec/edwards/signature.go @@ -0,0 +1,109 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "fmt" + "math/big" +) + +// Signature is a type representing an ecdsa signature. +type Signature struct { + R *big.Int + S *big.Int +} + +// SignatureSize is the size of an encoded ECDSA signature. +const SignatureSize = 64 + +// NewSignature instantiates a new signature given some R,S values. +func NewSignature(r, s *big.Int) *Signature { + return &Signature{r, s} +} + +// Serialize returns the ECDSA signature in the more strict format. +// +// The signatures are encoded as +// sig[0:32] R, a point encoded as little endian +// sig[32:64] S, scalar multiplication/addition results = (ab+c) mod l +// encoded also as little endian +func (sig Signature) Serialize() []byte { + rBytes := BigIntToEncodedBytes(sig.R) + sBytes := BigIntToEncodedBytes(sig.S) + + all := append(rBytes[:], sBytes[:]...) + + return all +} + +// parseSig is the default method of parsing a serialized Ed25519 signature. +func parseSig(curve *TwistedEdwardsCurve, sigStr []byte, der bool) (*Signature, + error) { + if der { + return nil, fmt.Errorf("DER signatures not allowed in ed25519") + } + + if len(sigStr) != SignatureSize { + return nil, fmt.Errorf("bad signature size; have %v, want %v", + len(sigStr), SignatureSize) + } + + rBytes := copyBytes(sigStr[0:32]) + r := EncodedBytesToBigInt(rBytes) + // r is a point on the curve as well. Evaluate it and make sure it's + // a valid point. + _, _, err := curve.EncodedBytesToBigIntPoint(rBytes) + if err != nil { + return nil, err + } + + sBytes := copyBytes(sigStr[32:64]) + s := EncodedBytesToBigInt(sBytes) + // s may not be zero or >= curve.N. + if s.Cmp(curve.N) >= 0 || s.Cmp(zero) == 0 { + return nil, fmt.Errorf("s scalar is empty or larger than the order of " + + "the curve") + } + + return &Signature{r, s}, nil +} + +// ParseSignature parses a signature in BER format for the curve type `curve' +// into a Signature type, perfoming some basic sanity checks. +func ParseSignature(curve *TwistedEdwardsCurve, sigStr []byte) (*Signature, + error) { + return parseSig(curve, sigStr, false) +} + +// ParseDERSignature offers a legacy function for plugging into Decred, which +// is based off btcec. +func ParseDERSignature(curve *TwistedEdwardsCurve, sigStr []byte) (*Signature, + error) { + return parseSig(curve, sigStr, false) +} + +// RecoverCompact, which uses a signature and a hash to recover is private +// key, is not yet implemented. +// TODO: Implement. +func RecoverCompact(signature, hash []byte) (*PublicKey, bool, error) { + // TODO One day reimplement this? cj + return nil, false, nil +} + +// GetR satisfies the chainec Signature interface. +func (s Signature) GetR() *big.Int { + return s.R +} + +// GetS satisfies the chainec Signature interface. +func (s Signature) GetS() *big.Int { + return s.S +} + +// GetType satisfies the chainec Signature interface. +func (s Signature) GetType() int { + return ecTypeEdwards +} diff --git a/dcrec/edwards/testdata/addpoints.py b/dcrec/edwards/testdata/addpoints.py new file mode 100644 index 00000000..220e522e --- /dev/null +++ b/dcrec/edwards/testdata/addpoints.py @@ -0,0 +1,11 @@ +import sys +from ed25519 import * + +s1 = sys.argv[1].decode("hex") +P1 = decodepoint(s1) + +s2 = sys.argv[2].decode("hex") +P2 = decodepoint(s2) + +P = edwards(P1, P2) +encodepointhex(P) diff --git a/dcrec/edwards/testdata/decodeint.py b/dcrec/edwards/testdata/decodeint.py new file mode 100644 index 00000000..9cbab1ce --- /dev/null +++ b/dcrec/edwards/testdata/decodeint.py @@ -0,0 +1,4 @@ +import sys +from ed25519 import * + +decodeinthex(sys.argv[1]) diff --git a/dcrec/edwards/testdata/decodepoint.py b/dcrec/edwards/testdata/decodepoint.py new file mode 100644 index 00000000..1941fb65 --- /dev/null +++ b/dcrec/edwards/testdata/decodepoint.py @@ -0,0 +1,5 @@ +import sys +from ed25519 import * + +decodepointhex(sys.argv[1]) + diff --git a/dcrec/edwards/testdata/ed25519.py b/dcrec/edwards/testdata/ed25519.py new file mode 100644 index 00000000..cbf873c5 --- /dev/null +++ b/dcrec/edwards/testdata/ed25519.py @@ -0,0 +1,121 @@ +import hashlib + +b = 256 +q = 2**255 - 19 +l = 2**252 + 27742317777372353535851937790883648493 + +def H(m): + return hashlib.sha512(m).digest() + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +d = -121665 * inv(121666) +I = expmod(2,(q-1)/4,q) + +def xrecover(y): + xx = (y*y-1) * inv(d*y*y+1) + x = expmod(xx,(q+3)/8,q) + if (x*x - xx) % q != 0: x = (x*I) % q + if x % 2 != 0: x = q-x + return x + +By = 4 * inv(5) +Bx = xrecover(By) +B = [Bx % q,By % q] + +def edwards(P,Q): + x1 = P[0] + y1 = P[1] + x2 = Q[0] + y2 = Q[1] + x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2) + y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2) + return [x3 % q,y3 % q] + +def scalarmult(P,e): + if e == 0: return [0,1] + Q = scalarmult(P,e/2) + Q = edwards(Q,Q) + if e & 1: Q = edwards(Q,P) + return Q + +def encodeint(y): + bits = [(y >> i) & 1 for i in range(b)] + return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)]) + +def encodeinthex(y): + encoded = encodeint(y) + print "".join("{:02x}".format(ord(c)) for c in encoded) + +def encodepoint(P): + x = P[0] + y = P[1] + bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1] + return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)]) + +def encodepointhex(P): + encoded = encodepoint(P) + print "".join("{:02x}".format(ord(c)) for c in encoded) + +def bit(h,i): + return (ord(h[i/8]) >> (i%8)) & 1 + +def publickey(sk): + h = H(sk) + a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2)) + A = scalarmult(B,a) + return encodepoint(A) + +def Hint(m): + h = H(m) + return sum(2**i * bit(h,i) for i in range(2*b)) + +def signature(m,sk,pk): + h = H(sk) + a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2)) + r = Hint(''.join([h[i] for i in range(b/8,b/4)]) + m) + R = scalarmult(B,r) + S = (r + Hint(encodepoint(R) + pk + m) * a) % l + return encodepoint(R) + encodeint(S) + +def isoncurve(P): + x = P[0] + y = P[1] + return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0 + +def decodeint(s): + return sum(2**i * bit(s,i) for i in range(0,b)) + +def decodeinthex(sH): + s = sH.decode("hex") + print decodeint(s) + +def decodepoint(s): + y = sum(2**i * bit(s,i) for i in range(0,b-1)) + x = xrecover(y) + if x & 1 != bit(s,b-1): x = q-x + P = [x,y] + if not isoncurve(P): raise Exception("decoding point that is not on curve") + return P + +def decodepointhex(sH): + s = sH.decode("hex") + P = decodepoint(s) + print str(P[0]) + ',' + str(P[1]) + +def checkvalid(s,m,pk): + if len(s) != b/4: raise Exception("signature length is wrong") + if len(pk) != b/8: raise Exception("public-key length is wrong") + R = decodepoint(s[0:b/8]) + A = decodepoint(pk) + S = decodeint(s[b/8:b/4]) + h = Hint(encodepoint(R) + pk + m) + if scalarmult(B,S) != edwards(R,scalarmult(A,h)): + raise Exception("signature does not pass verification") diff --git a/dcrec/edwards/testdata/encodeint.py b/dcrec/edwards/testdata/encodeint.py new file mode 100644 index 00000000..b9d4a134 --- /dev/null +++ b/dcrec/edwards/testdata/encodeint.py @@ -0,0 +1,4 @@ +import sys +from ed25519 import * + +encodeinthex(int(sys.argv[1])) diff --git a/dcrec/edwards/testdata/encodepoint.py b/dcrec/edwards/testdata/encodepoint.py new file mode 100644 index 00000000..fe9c2d7d --- /dev/null +++ b/dcrec/edwards/testdata/encodepoint.py @@ -0,0 +1,9 @@ +import sys +from ed25519 import * + +P = [] +x = int(sys.argv[1]) +P.append(x) +y = int(sys.argv[2]) +P.append(y) +encodepointhex(P) diff --git a/dcrec/edwards/testdata/genscalarmult.py b/dcrec/edwards/testdata/genscalarmult.py new file mode 100644 index 00000000..20a5685d --- /dev/null +++ b/dcrec/edwards/testdata/genscalarmult.py @@ -0,0 +1,35 @@ +import os +from ed25519 import * + +f = open("scalarmulttests.dat",'w') + +numTests = 50 +for i in range(0,numTests): + rand_string = os.urandom(32) + try: + p = decodepoint(rand_string) + except: + continue + rand_string = os.urandom(32) + s = decodeint(rand_string) + + mult = scalarmult(p, s) + + f.write("ScalarMultVectorHex{") + # Point to multiply + f.write('\"') + f.write("".join("{:02x}".format(ord(c)) for c in encodepoint(p))) + f.write('\"') + f.write(',') + # Scalar to multiply by + f.write('\"') + f.write("".join("{:02x}".format(ord(c)) for c in encodeint(s))) + f.write('\"') + f.write(',') + # Resulting point + f.write('\"') + f.write("".join("{:02x}".format(ord(c)) for c in encodepoint(mult))) + f.write('\"') + f.write('},\n') + +f.close() diff --git a/dcrec/edwards/testdata/sign.input.gz b/dcrec/edwards/testdata/sign.input.gz new file mode 100644 index 00000000..24f86649 Binary files /dev/null and b/dcrec/edwards/testdata/sign.input.gz differ diff --git a/dcrec/edwards/threshold.go b/dcrec/edwards/threshold.go new file mode 100644 index 00000000..3061369e --- /dev/null +++ b/dcrec/edwards/threshold.go @@ -0,0 +1,250 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "fmt" + "math/big" +) + +// Sha512VersionStringRFC6979 is the RFC6979 nonce version for a Schnorr signature +// over the Curve25519 curve using BLAKE256 as the hash function. +var Sha512VersionStringRFC6979 = []byte("Edwards+SHA512 ") + +// CombinePubkeys combines a slice of public keys into a single public key +// by adding them together with point addition. +func CombinePubkeys(curve *TwistedEdwardsCurve, + pks []*PublicKey) *PublicKey { + numPubKeys := len(pks) + + // Have to have at least two pubkeys. + if numPubKeys < 1 { + return nil + } + if numPubKeys == 1 { + return pks[0] + } + if pks == nil { + return nil + } + if pks[0] == nil || pks[1] == nil { + return nil + } + + var pkSumX *big.Int + var pkSumY *big.Int + + pkSumX, pkSumY = curve.Add(pks[0].GetX(), pks[0].GetY(), + pks[1].GetX(), pks[1].GetY()) + + if numPubKeys > 2 { + for i := 2; i < numPubKeys; i++ { + pkSumX, pkSumY = curve.Add(pkSumX, pkSumY, + pks[i].GetX(), pks[i].GetY()) + } + } + + if !curve.IsOnCurve(pkSumX, pkSumY) { + return nil + } + + return NewPublicKey(curve, pkSumX, pkSumY) +} + +// generateNoncePair deterministically generate a nonce pair for use in +// partial signing of a message. Returns a public key (nonce to disseminate) +// and a private nonce to keep as a secret for the signer. +func generateNoncePair(curve *TwistedEdwardsCurve, msg []byte, priv []byte, + nonceFunction func(*TwistedEdwardsCurve, []byte, []byte, []byte, + []byte) []byte, extra []byte, version []byte) ([]byte, *PublicKey, error) { + k := nonceFunction(curve, priv, msg, extra, version) + kBig := new(big.Int).SetBytes(k) + kBig.Mod(kBig, curve.N) + + // k scalar sanity checks. + if kBig.Cmp(zero) == 0 { + return nil, nil, fmt.Errorf("k scalar is zero") + } + if kBig.Cmp(curve.N) >= 0 { + return nil, nil, fmt.Errorf("k scalar is >= curve.N") + } + kBig.SetInt64(0) + + pubx, puby := curve.ScalarBaseMult(k) + pubnonce := NewPublicKey(curve, pubx, puby) + + return k, pubnonce, nil +} + +// GenerateNoncePair is the generalized and exported version of generateNoncePair. +func GenerateNoncePair(curve *TwistedEdwardsCurve, msg []byte, + privkey *PrivateKey, extra []byte, + version []byte) (*PrivateKey, *PublicKey, error) { + priv, pubNonce, err := generateNoncePair(curve, msg, privkey.Serialize(), + nonceRFC6979, extra, version) + if err != nil { + return nil, nil, err + } + + privNonce := NewPrivateKey(curve, + EncodedBytesToBigIntNoReverse(copyBytes(priv))) + return privNonce, pubNonce, nil +} + +// schnorrPartialSign creates a partial Schnorr signature which may be combined +// with other Schnorr signatures to create a valid signature for a group pubkey. +func schnorrPartialSign(curve *TwistedEdwardsCurve, msg []byte, priv []byte, + groupPublicKey []byte, privNonce []byte, pubNonceSum []byte) (*big.Int, + *big.Int, error) { + // Sanity checks. + if len(msg) != PrivScalarSize { + str := fmt.Sprintf("wrong size for message (got %v, want %v)", + len(msg), PrivScalarSize) + return nil, nil, fmt.Errorf("%v", str) + } + if len(priv) != PrivScalarSize { + str := fmt.Sprintf("wrong size for privkey (got %v, want %v)", + len(priv), PrivScalarSize) + return nil, nil, fmt.Errorf("%v", str) + } + if len(privNonce) != PrivScalarSize { + str := fmt.Sprintf("wrong size for privnonce (got %v, want %v)", + len(privNonce), PrivScalarSize) + return nil, nil, fmt.Errorf("%v", str) + } + if len(groupPublicKey) != PubKeyBytesLen { + str := fmt.Sprintf("wrong size for group public key (got %v, want %v)", + len(privNonce), PubKeyBytesLen) + return nil, nil, fmt.Errorf("%v", str) + } + if len(pubNonceSum) != PubKeyBytesLen { + str := fmt.Sprintf("wrong size for group nonce public key (got %v, "+ + "want %v)", + len(privNonce), PubKeyBytesLen) + return nil, nil, fmt.Errorf("%v", str) + } + + privBig := new(big.Int).SetBytes(priv) + if privBig.Cmp(zero) == 0 { + str := fmt.Sprintf("priv scalar is zero") + return nil, nil, fmt.Errorf("%v", str) + } + if privBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("priv scalar is out of bounds") + return nil, nil, fmt.Errorf("%v", str) + } + privBig.SetInt64(0) + + privNonceBig := new(big.Int).SetBytes(privNonce) + if privNonceBig.Cmp(zero) == 0 { + str := fmt.Sprintf("privNonce scalar is zero") + return nil, nil, fmt.Errorf("%v", str) + } + if privNonceBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("privNonce scalar is out of bounds") + return nil, nil, fmt.Errorf("%v", str) + } + privNonceBig.SetInt64(0) + + gpkX, gpkY, err := curve.EncodedBytesToBigIntPoint(copyBytes(groupPublicKey)) + if err != nil { + str := fmt.Sprintf("public key point could not be decoded") + return nil, nil, fmt.Errorf("%v", str) + } + if !curve.IsOnCurve(gpkX, gpkY) { + str := fmt.Sprintf("public key sum is off curve") + return nil, nil, fmt.Errorf("%v", str) + } + + gpnX, gpnY, err := curve.EncodedBytesToBigIntPoint(copyBytes(pubNonceSum)) + if err != nil { + str := fmt.Sprintf("public key point could not be decoded") + return nil, nil, fmt.Errorf("%v", str) + } + if !curve.IsOnCurve(gpnX, gpnY) { + str := fmt.Sprintf("public key sum is off curve") + return nil, nil, fmt.Errorf("%v", str) + } + + privDecoded, _, _ := PrivKeyFromScalar(curve, priv) + groupPubKeyDecoded, _ := ParsePubKey(curve, groupPublicKey) + privNonceDecoded, _, _ := PrivKeyFromScalar(curve, privNonce) + pubNonceSumDecoded, _ := ParsePubKey(curve, pubNonceSum) + + return SignThreshold(curve, privDecoded, groupPubKeyDecoded, msg, + privNonceDecoded, pubNonceSumDecoded) +} + +// schnorrCombineSigs is the generalized and exported version of +// schnorrPartialSign. +func SchnorrPartialSign(curve *TwistedEdwardsCurve, msg []byte, + priv *PrivateKey, groupPub *PublicKey, privNonce *PrivateKey, + pubSum *PublicKey) (*big.Int, *big.Int, error) { + privBytes := priv.Serialize() + defer zeroSlice(privBytes) + privNonceBytes := privNonce.Serialize() + defer zeroSlice(privNonceBytes) + + return schnorrPartialSign(curve, msg, privBytes, groupPub.Serialize(), + privNonceBytes, pubSum.Serialize()) +} + +// schnorrCombineSigs combines a list of partial Schnorr signatures s values +// into a complete signature s for some group public key. This is achieved +// by simply adding the s values of the partial signatures as scalars. +func schnorrCombineSigs(curve *TwistedEdwardsCurve, sigss [][]byte) (*big.Int, + error) { + combinedSigS := new(big.Int).SetInt64(0) + for i, sigs := range sigss { + sigsBI := EncodedBytesToBigInt(copyBytes(sigs)) + if sigsBI.Cmp(zero) == 0 { + str := fmt.Sprintf("sig s %v is zero", i) + return nil, fmt.Errorf("%v", str) + } + if sigsBI.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("sig s %v is out of bounds", i) + return nil, fmt.Errorf("%v", str) + } + + combinedSigS = ScalarAdd(combinedSigS, sigsBI) + combinedSigS.Mod(combinedSigS, curve.N) + } + + if combinedSigS.Cmp(zero) == 0 { + str := fmt.Sprintf("combined sig s %v is zero") + return nil, fmt.Errorf("%v", str) + } + + return combinedSigS, nil +} + +// schnorrCombineSigs is the generalized and exported version of +// generateNoncePair. +func SchnorrCombineSigs(curve *TwistedEdwardsCurve, + sigs []*Signature) (*Signature, error) { + sigss := make([][]byte, len(sigs), len(sigs)) + for i, sig := range sigs { + if sig == nil { + return nil, fmt.Errorf("nil signature") + } + + if i > 0 { + if sigs[i-1].GetR().Cmp(sig.GetR()) != 0 { + str := fmt.Sprintf("nonmatching r values for idx %v, %v", + i, i-1) + return nil, fmt.Errorf("%v", str) + } + } + + sigss[i] = BigIntToEncodedBytes(sig.GetS())[:] + } + + combinedSigS, err := schnorrCombineSigs(curve, sigss) + if err != nil { + return nil, err + } + + return NewSignature(sigs[0].R, combinedSigS), nil +} diff --git a/dcrec/edwards/threshold_test.go b/dcrec/edwards/threshold_test.go new file mode 100644 index 00000000..05621e0b --- /dev/null +++ b/dcrec/edwards/threshold_test.go @@ -0,0 +1,225 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package edwards + +import ( + "bytes" + "encoding/hex" + "math/big" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +type signerHex struct { + privkey string + privateNonce string + pubKeySumLocal string + partialSignature string +} + +type ThresholdTestVectorHex struct { + msg string + signersHex []signerHex + combinedSignature string +} + +type signer struct { + privkey []byte + pubkey *PublicKey + privateNonce []byte + publicNonce *PublicKey + pubKeySumLocal *PublicKey + partialSignature []byte +} + +type ThresholdTestVector struct { + msg []byte + signers []signer + combinedSignature []byte +} + +func TestSchnorrThreshold(t *testing.T) { + tRand := rand.New(rand.NewSource(543212345)) + maxSignatories := 10 + numTests := 5 + numSignatories := maxSignatories * numTests + + curve := new(TwistedEdwardsCurve) + curve.InitParam25519() + + msg, _ := hex.DecodeString( + "d04b98f48e8f8bcc15c6ae5ac050801cd6dcfd428fb5f9e65c4e16e7807340fa") + privkeys := randPrivScalarKeyList(curve, numSignatories) + + for i := 0; i < numTests; i++ { + numKeysForTest := tRand.Intn(maxSignatories-2) + 2 + keyIndex := i * maxSignatories + keysToUse := make([]*PrivateKey, numKeysForTest, numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + keysToUse[j] = privkeys[j+keyIndex] + } + + pubKeysToUse := make([]*PublicKey, numKeysForTest, + numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + _, pubkey, _ := PrivKeyFromScalar(curve, + keysToUse[j].Serialize()) + pubKeysToUse[j] = pubkey + } + + // Combine pubkeys. + allPubkeys := make([]*PublicKey, numKeysForTest, + numKeysForTest) + for j, pubkey := range pubKeysToUse { + allPubkeys[j] = pubkey + } + allPksSum := CombinePubkeys(curve, allPubkeys) + + privNoncesToUse := make([]*PrivateKey, numKeysForTest, + numKeysForTest) + pubNoncesToUse := make([]*PublicKey, numKeysForTest, + numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + nonce := nonceRFC6979(curve, keysToUse[j].Serialize(), msg, nil, + Sha512VersionStringRFC6979) + nonceBig := new(big.Int).SetBytes(nonce) + nonceBig.Mod(nonceBig, curve.N) + nonce = copyBytes(nonceBig.Bytes())[:] + nonce[31] &= 248 + + privNonce, pubNonce, err := PrivKeyFromScalar(curve, + nonce[:]) + assert.NotNil(t, privNonce) + assert.NotNil(t, pubNonce) + assert.NoError(t, err) + + privNoncesToUse[j] = privNonce + pubNoncesToUse[j] = pubNonce + } + + partialSignatures := make([]*Signature, numKeysForTest, numKeysForTest) + + // Partial signature generation. + publicNonceSum := CombinePubkeys(curve, pubNoncesToUse) + assert.NotNil(t, publicNonceSum) + for j, _ := range keysToUse { + r, s, err := schnorrPartialSign(curve, msg, keysToUse[j].Serialize(), + allPksSum.Serialize(), privNoncesToUse[j].Serialize(), + publicNonceSum.Serialize()) + assert.NoError(t, err) + localSig := NewSignature(r, s) + + partialSignatures[j] = localSig + } + + // Combine signatures. + combinedSignature, err := SchnorrCombineSigs(curve, partialSignatures) + assert.NoError(t, err) + + // Make sure the combined signatures are the same as the + // signatures that would be generated by simply adding + // the private keys and private nonces. + combinedPrivkeysD := new(big.Int).SetInt64(0) + for _, priv := range keysToUse { + combinedPrivkeysD = ScalarAdd(combinedPrivkeysD, priv.GetD()) + combinedPrivkeysD = combinedPrivkeysD.Mod(combinedPrivkeysD, curve.N) + } + + combinedNonceD := new(big.Int).SetInt64(0) + for _, priv := range privNoncesToUse { + combinedNonceD.Add(combinedNonceD, priv.GetD()) + combinedNonceD.Mod(combinedNonceD, curve.N) + } + + combinedPrivkey, _, err := PrivKeyFromScalar(curve, + copyBytes(combinedPrivkeysD.Bytes())[:]) + assert.NoError(t, err) + combinedNonce, _, err := PrivKeyFromScalar(curve, + copyBytes(combinedNonceD.Bytes())[:]) + assert.NoError(t, err) + cSigR, cSigS, err := SignFromScalar(curve, combinedPrivkey, + combinedNonce.Serialize(), msg) + sumSig := NewSignature(cSigR, cSigS) + assert.Equal(t, sumSig.Serialize(), combinedSignature.Serialize()) + + // Verify the combined signature and public keys. + ok := Verify(allPksSum, msg, combinedSignature.GetR(), + combinedSignature.GetS()) + assert.NoError(t, err) + assert.Equal(t, true, ok) + + // Corrupt some memory and make sure it breaks something. + corruptWhat := tRand.Intn(3) + randItem := tRand.Intn(numKeysForTest - 1) + + // Corrupt private key. + if corruptWhat == 0 { + privSerCorrupt := keysToUse[randItem].Serialize() + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + privSerCorrupt[pos] ^= 1 << uint8(bitPos) + keysToUse[randItem].ecPk.D.SetBytes(privSerCorrupt) + } + // Corrupt public key. + if corruptWhat == 1 { + pubXCorrupt := BigIntToEncodedBytes(pubKeysToUse[randItem].GetX()) + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + pubXCorrupt[pos] ^= 1 << uint8(bitPos) + pubKeysToUse[randItem].GetX().SetBytes(pubXCorrupt[:]) + } + // Corrupt private nonce. + if corruptWhat == 2 { + privSerCorrupt := privNoncesToUse[randItem].Serialize() + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + privSerCorrupt[pos] ^= 1 << uint8(bitPos) + privNoncesToUse[randItem].ecPk.D.SetBytes(privSerCorrupt) + } + // Corrupt public nonce. + if corruptWhat == 3 { + pubXCorrupt := BigIntToEncodedBytes(pubNoncesToUse[randItem].GetX()) + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + pubXCorrupt[pos] ^= 1 << uint8(bitPos) + pubNoncesToUse[randItem].GetX().SetBytes(pubXCorrupt[:]) + } + + for j, _ := range keysToUse { + thisPubNonce := pubNoncesToUse[j] + localPubNonces := make([]*PublicKey, numKeysForTest-1, + numKeysForTest-1) + itr := 0 + for _, pubNonce := range pubNoncesToUse { + if bytes.Equal(thisPubNonce.Serialize(), pubNonce.Serialize()) { + continue + } + localPubNonces[itr] = pubNonce + itr++ + } + publicNonceSum := CombinePubkeys(curve, localPubNonces) + + sigR, sigS, _ := schnorrPartialSign(curve, msg, + keysToUse[j].Serialize(), allPksSum.Serialize(), + privNoncesToUse[j].Serialize(), + publicNonceSum.Serialize()) + localSig := NewSignature(sigR, sigS) + + partialSignatures[j] = localSig + } + + // Combine signatures. + combinedSignature, _ = SchnorrCombineSigs(curve, partialSignatures) + + // Nothing that makes it here should be valid. + if allPksSum != nil && combinedSignature != nil { + ok = Verify(allPksSum, msg, combinedSignature.GetR(), + combinedSignature.GetS()) + assert.Equal(t, false, ok) + } + } +} diff --git a/dcrec/secp256k1/README.md b/dcrec/secp256k1/README.md new file mode 100644 index 00000000..5faa6e62 --- /dev/null +++ b/dcrec/secp256k1/README.md @@ -0,0 +1,61 @@ +secp256k1 +===== + +Package dcrec implements elliptic curve cryptography needed for working with +Decred (secp256k1 only for now). It is designed so that it may be used with the +standard crypto/ecdsa packages provided with go. A comprehensive suite of test +is provided to ensure proper functionality. Package dcrec was originally based +on work from ThePiachu which is licensed under the same terms as Go, but it has +signficantly diverged since then. The decred developers original is licensed +under the liberal ISC license. + +Although this package was primarily written for dcrd, it has intentionally been +designed so it can be used as a standalone package for any projects needing to +use secp256k1 elliptic curve cryptography. + +## Documentation + +[![GoDoc](https://godoc.org/github.com/decred/dcrd/dcrec?status.png)] +(http://godoc.org/github.com/decred/dcrd/dcrec) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site +[here](http://godoc.org/github.com/decred/dcrd/dcrec). + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/decred/dcrd/dcrec + +## Installation + +```bash +$ go get github.com/decred/dcrd/dcrec +``` + +## Examples + +* [Sign Message] + (http://godoc.org/github.com/decred/dcrd/dcrec#example-package--SignMessage) + Demonstrates signing a message with a secp256k1 private key that is first + parsed form raw bytes and serializing the generated signature. + +* [Verify Signature] + (http://godoc.org/github.com/decred/dcrd/dcrec#example-package--VerifySignature) + Demonstrates verifying a secp256k1 signature against a public key that is + first parsed from raw bytes. The signature is also parsed from raw bytes. + +* [Encryption] + (http://godoc.org/github.com/decred/dcrd/dcrec#example-package--EncryptMessage) + Demonstrates encrypting a message for a public key that is first parsed from + raw bytes, then decrypting it using the corresponding private key. + +* [Decryption] + (http://godoc.org/github.com/decred/dcrdy/dcrec#example-package--DecryptMessage) + Demonstrates decrypting a message using a private key that is first parsed + from raw bytes. + +## License + +Package dcrec is licensed under the [copyfree](http://copyfree.org) ISC License +except for dcrec.go and dcrec_test.go which is under the same license as Go. + diff --git a/btcec/bench_test.go b/dcrec/secp256k1/bench_test.go similarity index 80% rename from btcec/bench_test.go rename to dcrec/secp256k1/bench_test.go index ccdac144..c6919913 100644 --- a/btcec/bench_test.go +++ b/dcrec/secp256k1/bench_test.go @@ -1,8 +1,9 @@ // Copyright 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import "testing" @@ -10,13 +11,13 @@ import "testing" // Z values of 1 so that the associated optimizations are used. func BenchmarkAddJacobian(b *testing.B) { b.StopTimer() - x1 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6") - y1 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232") - z1 := new(fieldVal).SetHex("1") - x2 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6") - y2 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232") - z2 := new(fieldVal).SetHex("1") - x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal) + x1 := new(FieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6") + y1 := new(FieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232") + z1 := new(FieldVal).SetHex("1") + x2 := new(FieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6") + y2 := new(FieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232") + z2 := new(FieldVal).SetHex("1") + x3, y3, z3 := new(FieldVal), new(FieldVal), new(FieldVal) curve := S256() b.StartTimer() for i := 0; i < b.N; i++ { @@ -29,13 +30,13 @@ func BenchmarkAddJacobian(b *testing.B) { // Z=1 aren't used. func BenchmarkAddJacobianNotZOne(b *testing.B) { b.StopTimer() - x1 := new(fieldVal).SetHex("d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718") - y1 := new(fieldVal).SetHex("5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190") - z1 := new(fieldVal).SetHex("2") - x2 := new(fieldVal).SetHex("91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4") - y2 := new(fieldVal).SetHex("03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1") - z2 := new(fieldVal).SetHex("3") - x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal) + x1 := new(FieldVal).SetHex("d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718") + y1 := new(FieldVal).SetHex("5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190") + z1 := new(FieldVal).SetHex("2") + x2 := new(FieldVal).SetHex("91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4") + y2 := new(FieldVal).SetHex("03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1") + z2 := new(FieldVal).SetHex("3") + x3, y3, z3 := new(FieldVal), new(FieldVal), new(FieldVal) curve := S256() b.StartTimer() for i := 0; i < b.N; i++ { diff --git a/btcec/btcec.go b/dcrec/secp256k1/btcec.go similarity index 92% rename from btcec/btcec.go rename to dcrec/secp256k1/btcec.go index 98d7b143..3fa00ceb 100644 --- a/btcec/btcec.go +++ b/dcrec/secp256k1/btcec.go @@ -1,10 +1,11 @@ // Copyright 2010 The Go Authors. All rights reserved. +// Copyright (c) 2015 The Decred developers // Copyright 2011 ThePiachu. All rights reserved. // Copyright 2013-2014 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 // References: // [SECG]: Recommended Elliptic Curve Domain Parameters @@ -26,12 +27,15 @@ import ( ) var ( - // fieldOne is simply the integer 1 in field representation. It is + // FieldOne is simply the integer 1 in field representation. It is // used to avoid needing to create it multiple times during the internal // arithmetic. - fieldOne = new(fieldVal).SetInt(1) + FieldOne = new(FieldVal).SetInt(1) ) +// ecTypeSecp256k1 is the ECDSA type for the chainec interface. +var ecTypeSecp256k1 int = 0 + // KoblitzCurve supports a koblitz curve implementation that fits the ECC Curve // interface from crypto/elliptic. type KoblitzCurve struct { @@ -44,7 +48,7 @@ type KoblitzCurve struct { byteSize int // bytePoints - bytePoints *[32][256][3]fieldVal + bytePoints *[32][256][3]FieldVal // The next 6 values are used specifically for endomorphism // optimizations in ScalarMult. @@ -54,7 +58,7 @@ type KoblitzCurve struct { // beta must fulfill beta^3 = 1 mod P where P is the prime field of the // curve. - beta *fieldVal + beta *FieldVal // See the EndomorphismVectors in gensecp256k1.go to see how these are // derived. @@ -69,24 +73,24 @@ func (curve *KoblitzCurve) Params() *elliptic.CurveParams { return curve.CurveParams } -// bigAffineToField takes an affine point (x, y) as big integers and converts +// BigAffineToField takes an affine point (x, y) as big integers and converts // it to an affine point as field values. -func (curve *KoblitzCurve) bigAffineToField(x, y *big.Int) (*fieldVal, *fieldVal) { - x3, y3 := new(fieldVal), new(fieldVal) +func (curve *KoblitzCurve) BigAffineToField(x, y *big.Int) (*FieldVal, *FieldVal) { + x3, y3 := new(FieldVal), new(FieldVal) x3.SetByteSlice(x.Bytes()) y3.SetByteSlice(y.Bytes()) return x3, y3 } -// fieldJacobianToBigAffine takes a Jacobian point (x, y, z) as field values and +// FieldJacobianToBigAffine takes a Jacobian point (x, y, z) as field values and // converts it to an affine point as big integers. -func (curve *KoblitzCurve) fieldJacobianToBigAffine(x, y, z *fieldVal) (*big.Int, *big.Int) { +func (curve *KoblitzCurve) FieldJacobianToBigAffine(x, y, z *FieldVal) (*big.Int, *big.Int) { // Inversions are expensive and both point addition and point doubling // are faster when working with points that have a z value of one. So, // if the point needs to be converted to affine, go ahead and normalize // the point itself at the same time as the calculation is the same. - var zInv, tempZ fieldVal + var zInv, tempZ FieldVal zInv.Set(z).Inverse() // zInv = Z^-1 tempZ.SquareVal(&zInv) // tempZ = Z^-2 x.Mul(&tempZ) // X = X/Z^2 (mag: 1) @@ -109,11 +113,11 @@ func (curve *KoblitzCurve) fieldJacobianToBigAffine(x, y, z *fieldVal) (*big.Int // crypto/elliptic algorithm since a = 0 not -3. func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { // Convert big ints to field values for faster arithmetic. - fx, fy := curve.bigAffineToField(x, y) + fx, fy := curve.BigAffineToField(x, y) // Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7 - y2 := new(fieldVal).SquareVal(fy).Normalize() - result := new(fieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize() + y2 := new(FieldVal).SquareVal(fy).Normalize() + result := new(FieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize() return y2.Equals(result) } @@ -122,7 +126,7 @@ func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { // (x1, y1, 1) + (x2, y2, 1) = (x3, y3, z3). It performs faster addition than // the generic add routine since less arithmetic is needed due to the ability to // avoid the z value multiplications. -func (curve *KoblitzCurve) addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *FieldVal) { // To compute the point addition efficiently, this implementation splits // the equation into intermediate elements which are used to minimize // the number of field multiplications using the method shown at: @@ -162,8 +166,8 @@ func (curve *KoblitzCurve) addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *f // Calculate X3, Y3, and Z3 according to the intermediate elements // breakdown above. - var h, i, j, r, v fieldVal - var negJ, neg2V, negX3 fieldVal + var h, i, j, r, v FieldVal + var negJ, neg2V, negX3 FieldVal h.Set(x1).Negate(1).Add(x2) // H = X2-X1 (mag: 3) i.SquareVal(&h).MulInt(4) // I = 4*H^2 (mag: 4) j.Mul2(&h, &i) // J = H*I (mag: 1) @@ -188,7 +192,7 @@ func (curve *KoblitzCurve) addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *f // (x1, y1, z1) + (x2, y2, z1) = (x3, y3, z3). It performs faster addition than // the generic add routine since less arithmetic is needed due to the known // equivalence. -func (curve *KoblitzCurve) addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3 *FieldVal) { // To compute the point addition efficiently, this implementation splits // the equation into intermediate elements which are used to minimize // the number of field multiplications using a slightly modified version @@ -229,8 +233,8 @@ func (curve *KoblitzCurve) addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVa // Calculate X3, Y3, and Z3 according to the intermediate elements // breakdown above. - var a, b, c, d, e, f fieldVal - var negX1, negY1, negE, negX3 fieldVal + var a, b, c, d, e, f FieldVal + var negX1, negY1, negE, negX3 FieldVal negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2) a.Set(&negX1).Add(x2) // A = X2-X1 (mag: 3) @@ -257,7 +261,7 @@ func (curve *KoblitzCurve) addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVa // (x2, y2, 1) = (x3, y3, z3). It performs faster addition than the generic // add routine since less arithmetic is needed due to the ability to avoid // multiplications by the second point's z value. -func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *FieldVal) { // To compute the point addition efficiently, this implementation splits // the equation into intermediate elements which are used to minimize // the number of field multiplications using the method shown at: @@ -279,7 +283,7 @@ func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldV // point, the x and y values need to be converted to like terms. Due to // the assumption made for this function that the second point has a z // value of 1 (z2=1), the first point is already "converted". - var z1z1, u2, s2 fieldVal + var z1z1, u2, s2 FieldVal x1.Normalize() y1.Normalize() z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) @@ -304,8 +308,8 @@ func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldV // Calculate X3, Y3, and Z3 according to the intermediate elements // breakdown above. - var h, hh, i, j, r, rr, v fieldVal - var negX1, negY1, negX3 fieldVal + var h, hh, i, j, r, rr, v FieldVal + var negX1, negY1, negX3 FieldVal negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) h.Add2(&u2, &negX1) // H = U2-X1 (mag: 3) hh.SquareVal(&h) // HH = H^2 (mag: 1) @@ -333,7 +337,7 @@ func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldV // assumptions about the z values of the two points and stores the result in // (x3, y3, z3). That is to say (x1, y1, z1) + (x2, y2, z2) = (x3, y3, z3). It // is the slowest of the add routines due to requiring the most arithmetic. -func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *FieldVal) { // To compute the point addition efficiently, this implementation splits // the equation into intermediate elements which are used to minimize // the number of field multiplications using the method shown at: @@ -354,7 +358,7 @@ func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldV // infinity. Since any number of Jacobian coordinates can represent the // same affine point, the x and y values need to be converted to like // terms. - var z1z1, z2z2, u1, u2, s1, s2 fieldVal + var z1z1, z2z2, u1, u2, s1, s2 FieldVal z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) z2z2.SquareVal(z2) // Z2Z2 = Z2^2 (mag: 1) u1.Set(x1).Mul(&z2z2).Normalize() // U1 = X1*Z2Z2 (mag: 1) @@ -380,8 +384,8 @@ func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldV // Calculate X3, Y3, and Z3 according to the intermediate elements // breakdown above. - var h, i, j, r, rr, v fieldVal - var negU1, negS1, negX3 fieldVal + var h, i, j, r, rr, v FieldVal + var negU1, negS1, negX3 FieldVal negU1.Set(&u1).Negate(1) // negU1 = -U1 (mag: 2) h.Add2(&u2, &negU1) // H = U2-U1 (mag: 3) i.Set(&h).MulInt(2).Square() // I = (2*H)^2 (mag: 2) @@ -404,9 +408,9 @@ func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldV y3.Normalize() } -// addJacobian adds the passed Jacobian points (x1, y1, z1) and (x2, y2, z2) +// AddJacobian adds the passed Jacobian points (x1, y1, z1) and (x2, y2, z2) // together and stores the result in (x3, y3, z3). -func (curve *KoblitzCurve) addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) AddJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *FieldVal) { // A point at infinity is the identity according to the group law for // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. if (x1.IsZero() && y1.IsZero()) || z1.IsZero() { @@ -429,8 +433,8 @@ func (curve *KoblitzCurve) addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *field // by using those assumptions. z1.Normalize() z2.Normalize() - isZ1One := z1.Equals(fieldOne) - isZ2One := z2.Equals(fieldOne) + isZ1One := z1.Equals(FieldOne) + isZ2One := z2.Equals(FieldOne) switch { case isZ1One && isZ2One: curve.addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3) @@ -462,15 +466,15 @@ func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // Convert the affine coordinates from big integers to field values // and do the point addition in Jacobian projective space. - fx1, fy1 := curve.bigAffineToField(x1, y1) - fx2, fy2 := curve.bigAffineToField(x2, y2) - fx3, fy3, fz3 := new(fieldVal), new(fieldVal), new(fieldVal) - fOne := new(fieldVal).SetInt(1) - curve.addJacobian(fx1, fy1, fOne, fx2, fy2, fOne, fx3, fy3, fz3) + fx1, fy1 := curve.BigAffineToField(x1, y1) + fx2, fy2 := curve.BigAffineToField(x2, y2) + fx3, fy3, fz3 := new(FieldVal), new(FieldVal), new(FieldVal) + fOne := new(FieldVal).SetInt(1) + curve.AddJacobian(fx1, fy1, fOne, fx2, fy2, fOne, fx3, fy3, fz3) // Convert the Jacobian coordinate field values back to affine big // integers. - return curve.fieldJacobianToBigAffine(fx3, fy3, fz3) + return curve.FieldJacobianToBigAffine(fx3, fy3, fz3) } // doubleZ1EqualsOne performs point doubling on the passed Jacobian point @@ -478,7 +482,7 @@ func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // the result in (x3, y3, z3). That is to say (x3, y3, z3) = 2*(x1, y1, 1). It // performs faster point doubling than the generic routine since less arithmetic // is needed due to the ability to avoid multiplication by the z value. -func (curve *KoblitzCurve) doubleZ1EqualsOne(x1, y1, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) doubleZ1EqualsOne(x1, y1, x3, y3, z3 *FieldVal) { // This function uses the assumptions that z1 is 1, thus the point // doubling formulas reduce to: // @@ -502,7 +506,7 @@ func (curve *KoblitzCurve) doubleZ1EqualsOne(x1, y1, x3, y3, z3 *fieldVal) { // // This results in a cost of 1 field multiplication, 5 field squarings, // 6 field additions, and 5 integer multiplications. - var a, b, c, d, e, f fieldVal + var a, b, c, d, e, f FieldVal z3.Set(y1).MulInt(2) // Z3 = 2*Y1 (mag: 2) a.SquareVal(x1) // A = X1^2 (mag: 1) b.SquareVal(y1) // B = Y1^2 (mag: 1) @@ -528,7 +532,7 @@ func (curve *KoblitzCurve) doubleZ1EqualsOne(x1, y1, x3, y3, z3 *fieldVal) { // any assumptions about the z value and stores the result in (x3, y3, z3). // That is to say (x3, y3, z3) = 2*(x1, y1, z1). It is the slowest of the point // doubling routines due to requiring the most arithmetic. -func (curve *KoblitzCurve) doubleGeneric(x1, y1, z1, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) doubleGeneric(x1, y1, z1, x3, y3, z3 *FieldVal) { // Point doubling formula for Jacobian coordinates for the secp256k1 // curve: // X3 = (3*X1^2)^2 - 8*X1*Y1^2 @@ -551,7 +555,7 @@ func (curve *KoblitzCurve) doubleGeneric(x1, y1, z1, x3, y3, z3 *fieldVal) { // // This results in a cost of 1 field multiplication, 5 field squarings, // 6 field additions, and 5 integer multiplications. - var a, b, c, d, e, f fieldVal + var a, b, c, d, e, f FieldVal z3.Mul2(y1, z1).MulInt(2) // Z3 = 2*Y1*Z1 (mag: 2) a.SquareVal(x1) // A = X1^2 (mag: 1) b.SquareVal(y1) // B = Y1^2 (mag: 1) @@ -575,7 +579,7 @@ func (curve *KoblitzCurve) doubleGeneric(x1, y1, z1, x3, y3, z3 *fieldVal) { // doubleJacobian doubles the passed Jacobian point (x1, y1, z1) and stores the // result in (x3, y3, z3). -func (curve *KoblitzCurve) doubleJacobian(x1, y1, z1, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) doubleJacobian(x1, y1, z1, x3, y3, z3 *FieldVal) { // Doubling a point at infinity is still infinity. if y1.IsZero() || z1.IsZero() { x3.SetInt(0) @@ -588,7 +592,7 @@ func (curve *KoblitzCurve) doubleJacobian(x1, y1, z1, x3, y3, z3 *fieldVal) { // by avoiding the multiplication on the z value. This section calls // a point doubling function which is accelerated by using that // assumption when possible. - if z1.Normalize().Equals(fieldOne) { + if z1.Normalize().Equals(FieldOne) { curve.doubleZ1EqualsOne(x1, y1, x3, y3, z3) return } @@ -606,14 +610,14 @@ func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { // Convert the affine coordinates from big integers to field values // and do the point doubling in Jacobian projective space. - fx1, fy1 := curve.bigAffineToField(x1, y1) - fx3, fy3, fz3 := new(fieldVal), new(fieldVal), new(fieldVal) - fOne := new(fieldVal).SetInt(1) + fx1, fy1 := curve.BigAffineToField(x1, y1) + fx3, fy3, fz3 := new(FieldVal), new(FieldVal), new(FieldVal) + fOne := new(FieldVal).SetInt(1) curve.doubleJacobian(fx1, fy1, fOne, fx3, fy3, fz3) // Convert the Jacobian coordinate field values back to affine big // integers. - return curve.fieldJacobianToBigAffine(fx3, fy3, fz3) + return curve.FieldJacobianToBigAffine(fx3, fy3, fz3) } // splitK returns a balanced length-two representation of k and their signs. @@ -627,7 +631,7 @@ func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { func (curve *KoblitzCurve) splitK(k []byte) ([]byte, []byte, int, int) { // All math here is done with big.Int, which is slow. // At some point, it might be useful to write something similar to - // fieldVal but for N instead of P as the prime field if this ends up + // FieldVal but for N instead of P as the prime field if this ends up // being a bottleneck. bigIntK := new(big.Int) c1, c2 := new(big.Int), new(big.Int) @@ -756,7 +760,7 @@ func NAF(k []byte) ([]byte, []byte) { // Part of the elliptic.Curve interface. func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { // Point Q = ∞ (point at infinity). - qx, qy, qz := new(fieldVal), new(fieldVal), new(fieldVal) + qx, qy, qz := new(FieldVal), new(FieldVal), new(FieldVal) // Decompose K into k1 and k2 in order to halve the number of EC ops. // See Algorithm 3.74 in [GECC]. @@ -766,16 +770,16 @@ func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big // k * P = k1 * P + k2 * Ï•(P) // // P1 below is P in the equation, P2 below is Ï•(P) in the equation - p1x, p1y := curve.bigAffineToField(Bx, By) - p1yNeg := new(fieldVal).NegateVal(p1y, 1) - p1z := new(fieldVal).SetInt(1) + p1x, p1y := curve.BigAffineToField(Bx, By) + p1yNeg := new(FieldVal).NegateVal(p1y, 1) + p1z := new(FieldVal).SetInt(1) // NOTE: Ï•(x,y) = (βx,y). The Jacobian z coordinate is 1, so this math // goes through. - p2x := new(fieldVal).Mul2(p1x, curve.beta) - p2y := new(fieldVal).Set(p1y) - p2yNeg := new(fieldVal).NegateVal(p2y, 1) - p2z := new(fieldVal).SetInt(1) + p2x := new(FieldVal).Mul2(p1x, curve.beta) + p2y := new(FieldVal).Set(p1y) + p2yNeg := new(FieldVal).NegateVal(p2y, 1) + p2z := new(FieldVal).SetInt(1) // Flip the positive and negative values of the points as needed // depending on the signs of k1 and k2. As mentioned in the equation @@ -831,18 +835,18 @@ func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big curve.doubleJacobian(qx, qy, qz, qx, qy, qz) if k1BytePos&0x80 == 0x80 { - curve.addJacobian(qx, qy, qz, p1x, p1y, p1z, + curve.AddJacobian(qx, qy, qz, p1x, p1y, p1z, qx, qy, qz) } else if k1ByteNeg&0x80 == 0x80 { - curve.addJacobian(qx, qy, qz, p1x, p1yNeg, p1z, + curve.AddJacobian(qx, qy, qz, p1x, p1yNeg, p1z, qx, qy, qz) } if k2BytePos&0x80 == 0x80 { - curve.addJacobian(qx, qy, qz, p2x, p2y, p2z, + curve.AddJacobian(qx, qy, qz, p2x, p2y, p2z, qx, qy, qz) } else if k2ByteNeg&0x80 == 0x80 { - curve.addJacobian(qx, qy, qz, p2x, p2yNeg, p2z, + curve.AddJacobian(qx, qy, qz, p2x, p2yNeg, p2z, qx, qy, qz) } k1BytePos <<= 1 @@ -853,7 +857,7 @@ func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big } // Convert the Jacobian coordinate field values back to affine big.Ints. - return curve.fieldJacobianToBigAffine(qx, qy, qz) + return curve.FieldJacobianToBigAffine(qx, qy, qz) } // ScalarBaseMult returns k*G where G is the base point of the group and k is a @@ -864,7 +868,7 @@ func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { diff := len(curve.bytePoints) - len(newK) // Point Q = ∞ (point at infinity). - qx, qy, qz := new(fieldVal), new(fieldVal), new(fieldVal) + qx, qy, qz := new(FieldVal), new(FieldVal), new(FieldVal) // curve.bytePoints has all 256 byte points for each 8-bit window. The // strategy is to add up the byte points. This is best understood by @@ -873,9 +877,9 @@ func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { // and added together. for i, byteVal := range newK { p := curve.bytePoints[diff+i][byteVal] - curve.addJacobian(qx, qy, qz, &p[0], &p[1], &p[2], qx, qy, qz) + curve.AddJacobian(qx, qy, qz, &p[0], &p[1], &p[2], qx, qy, qz) } - return curve.fieldJacobianToBigAffine(qx, qy, qz) + return curve.FieldJacobianToBigAffine(qx, qy, qz) } // QPlus1Div4 returns the Q+1/4 constant for the curve for use in calculating @@ -933,7 +937,7 @@ func initS256() { // They have also been independently derived from the code in the // EndomorphismVectors function in gensecp256k1.go. secp256k1.lambda = fromHex("5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72") - secp256k1.beta = new(fieldVal).SetHex("7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE") + secp256k1.beta = new(FieldVal).SetHex("7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE") secp256k1.a1 = fromHex("3086D221A7D46BCDE86C90E49284EB15") secp256k1.b1 = fromHex("-E4437ED6010E88286F547FA90ABFE4C3") secp256k1.a2 = fromHex("114CA50F7A8E2F3F657C1108D9D44CFD8") @@ -942,7 +946,7 @@ func initS256() { // Alternatively, we can use the parameters below, however, they seem // to be about 8% slower. // secp256k1.lambda = fromHex("AC9C52B33FA3CF1F5AD9E3FD77ED9BA4A880B9FC8EC739C2E0CFC810B51283CE") - // secp256k1.beta = new(fieldVal).SetHex("851695D49A83F8EF919BB86153CBCB16630FB68AED0A766A3EC693D68E6AFA40") + // secp256k1.beta = new(FieldVal).SetHex("851695D49A83F8EF919BB86153CBCB16630FB68AED0A766A3EC693D68E6AFA40") // secp256k1.a1 = fromHex("E4437ED6010E88286F547FA90ABFE4C3") // secp256k1.b1 = fromHex("-3086D221A7D46BCDE86C90E49284EB15") // secp256k1.a2 = fromHex("3086D221A7D46BCDE86C90E49284EB15") diff --git a/btcec/btcec_test.go b/dcrec/secp256k1/btcec_test.go similarity index 91% rename from btcec/btcec_test.go rename to dcrec/secp256k1/btcec_test.go index 3a3599c3..7c00b495 100644 --- a/btcec/btcec_test.go +++ b/dcrec/secp256k1/btcec_test.go @@ -1,10 +1,11 @@ // Copyright 2011 The Go Authors. All rights reserved. +// Copyright (c) 2015 The Decred developers // Copyright 2011 ThePiachu. All rights reserved. // Copyright 2013-2014 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "crypto/rand" @@ -14,7 +15,7 @@ import ( "math/big" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec/secp256k1" ) // TestAddJacobian tests addition of points projected in Jacobian coordinates. @@ -211,37 +212,37 @@ func TestAddJacobian(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Convert hex to field values. - x1 := btcec.NewFieldVal().SetHex(test.x1) - y1 := btcec.NewFieldVal().SetHex(test.y1) - z1 := btcec.NewFieldVal().SetHex(test.z1) - x2 := btcec.NewFieldVal().SetHex(test.x2) - y2 := btcec.NewFieldVal().SetHex(test.y2) - z2 := btcec.NewFieldVal().SetHex(test.z2) - x3 := btcec.NewFieldVal().SetHex(test.x3) - y3 := btcec.NewFieldVal().SetHex(test.y3) - z3 := btcec.NewFieldVal().SetHex(test.z3) + x1 := secp256k1.NewFieldVal().SetHex(test.x1) + y1 := secp256k1.NewFieldVal().SetHex(test.y1) + z1 := secp256k1.NewFieldVal().SetHex(test.z1) + x2 := secp256k1.NewFieldVal().SetHex(test.x2) + y2 := secp256k1.NewFieldVal().SetHex(test.y2) + z2 := secp256k1.NewFieldVal().SetHex(test.z2) + x3 := secp256k1.NewFieldVal().SetHex(test.x3) + y3 := secp256k1.NewFieldVal().SetHex(test.y3) + z3 := secp256k1.NewFieldVal().SetHex(test.z3) // Ensure the test data is using points that are actually on // the curve (or the point at infinity). - if !z1.IsZero() && !btcec.S256().TstIsJacobianOnCurve(x1, y1, z1) { + if !z1.IsZero() && !secp256k1.S256().TstIsJacobianOnCurve(x1, y1, z1) { t.Errorf("#%d first point is not on the curve -- "+ "invalid test data", i) continue } - if !z2.IsZero() && !btcec.S256().TstIsJacobianOnCurve(x2, y2, z2) { + if !z2.IsZero() && !secp256k1.S256().TstIsJacobianOnCurve(x2, y2, z2) { t.Errorf("#%d second point is not on the curve -- "+ "invalid test data", i) continue } - if !z3.IsZero() && !btcec.S256().TstIsJacobianOnCurve(x3, y3, z3) { + if !z3.IsZero() && !secp256k1.S256().TstIsJacobianOnCurve(x3, y3, z3) { t.Errorf("#%d expected point is not on the curve -- "+ "invalid test data", i) continue } // Add the two points. - rx, ry, rz := btcec.NewFieldVal(), btcec.NewFieldVal(), btcec.NewFieldVal() - btcec.S256().TstAddJacobian(x1, y1, z1, x2, y2, z2, rx, ry, rz) + rx, ry, rz := secp256k1.NewFieldVal(), secp256k1.NewFieldVal(), secp256k1.NewFieldVal() + secp256k1.S256().TstAddJacobian(x1, y1, z1, x2, y2, z2, rx, ry, rz) // Ensure result matches expected. if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) { @@ -320,24 +321,24 @@ func TestAddAffine(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). - if !(x1.Sign() == 0 && y1.Sign() == 0) && !btcec.S256().IsOnCurve(x1, y1) { + if !(x1.Sign() == 0 && y1.Sign() == 0) && !secp256k1.S256().IsOnCurve(x1, y1) { t.Errorf("#%d first point is not on the curve -- "+ "invalid test data", i) continue } - if !(x2.Sign() == 0 && y2.Sign() == 0) && !btcec.S256().IsOnCurve(x2, y2) { + if !(x2.Sign() == 0 && y2.Sign() == 0) && !secp256k1.S256().IsOnCurve(x2, y2) { t.Errorf("#%d second point is not on the curve -- "+ "invalid test data", i) continue } - if !(x3.Sign() == 0 && y3.Sign() == 0) && !btcec.S256().IsOnCurve(x3, y3) { + if !(x3.Sign() == 0 && y3.Sign() == 0) && !secp256k1.S256().IsOnCurve(x3, y3) { t.Errorf("#%d expected point is not on the curve -- "+ "invalid test data", i) continue } // Add the two points. - rx, ry := btcec.S256().Add(x1, y1, x2, y2) + rx, ry := secp256k1.S256().Add(x1, y1, x2, y2) // Ensure result matches expected. if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 { @@ -387,29 +388,29 @@ func TestDoubleJacobian(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Convert hex to field values. - x1 := btcec.NewFieldVal().SetHex(test.x1) - y1 := btcec.NewFieldVal().SetHex(test.y1) - z1 := btcec.NewFieldVal().SetHex(test.z1) - x3 := btcec.NewFieldVal().SetHex(test.x3) - y3 := btcec.NewFieldVal().SetHex(test.y3) - z3 := btcec.NewFieldVal().SetHex(test.z3) + x1 := secp256k1.NewFieldVal().SetHex(test.x1) + y1 := secp256k1.NewFieldVal().SetHex(test.y1) + z1 := secp256k1.NewFieldVal().SetHex(test.z1) + x3 := secp256k1.NewFieldVal().SetHex(test.x3) + y3 := secp256k1.NewFieldVal().SetHex(test.y3) + z3 := secp256k1.NewFieldVal().SetHex(test.z3) // Ensure the test data is using points that are actually on // the curve (or the point at infinity). - if !z1.IsZero() && !btcec.S256().TstIsJacobianOnCurve(x1, y1, z1) { + if !z1.IsZero() && !secp256k1.S256().TstIsJacobianOnCurve(x1, y1, z1) { t.Errorf("#%d first point is not on the curve -- "+ "invalid test data", i) continue } - if !z3.IsZero() && !btcec.S256().TstIsJacobianOnCurve(x3, y3, z3) { + if !z3.IsZero() && !secp256k1.S256().TstIsJacobianOnCurve(x3, y3, z3) { t.Errorf("#%d expected point is not on the curve -- "+ "invalid test data", i) continue } // Double the point. - rx, ry, rz := btcec.NewFieldVal(), btcec.NewFieldVal(), btcec.NewFieldVal() - btcec.S256().TstDoubleJacobian(x1, y1, z1, rx, ry, rz) + rx, ry, rz := secp256k1.NewFieldVal(), secp256k1.NewFieldVal(), secp256k1.NewFieldVal() + secp256k1.S256().TstDoubleJacobian(x1, y1, z1, rx, ry, rz) // Ensure result matches expected. if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) { @@ -471,19 +472,19 @@ func TestDoubleAffine(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). - if !(x1.Sign() == 0 && y1.Sign() == 0) && !btcec.S256().IsOnCurve(x1, y1) { + if !(x1.Sign() == 0 && y1.Sign() == 0) && !secp256k1.S256().IsOnCurve(x1, y1) { t.Errorf("#%d first point is not on the curve -- "+ "invalid test data", i) continue } - if !(x3.Sign() == 0 && y3.Sign() == 0) && !btcec.S256().IsOnCurve(x3, y3) { + if !(x3.Sign() == 0 && y3.Sign() == 0) && !secp256k1.S256().IsOnCurve(x3, y3) { t.Errorf("#%d expected point is not on the curve -- "+ "invalid test data", i) continue } // Double the point. - rx, ry := btcec.S256().Double(x1, y1) + rx, ry := secp256k1.S256().Double(x1, y1) // Ensure result matches expected. if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 { @@ -495,7 +496,7 @@ func TestDoubleAffine(t *testing.T) { } func TestOnCurve(t *testing.T) { - s256 := btcec.S256() + s256 := secp256k1.S256() if !s256.IsOnCurve(s256.Params().Gx, s256.Params().Gy) { t.Errorf("FAIL S256") } @@ -537,7 +538,7 @@ var s256BaseMultTests = []baseMultTest{ //TODO: test different curves as well? func TestBaseMult(t *testing.T) { - s256 := btcec.S256() + s256 := secp256k1.S256() for i, e := range s256BaseMultTests { k, ok := new(big.Int).SetString(e.k, 16) if !ok { @@ -554,7 +555,7 @@ func TestBaseMult(t *testing.T) { } func TestBaseMultVerify(t *testing.T) { - s256 := btcec.S256() + s256 := secp256k1.S256() for bytes := 1; bytes < 40; bytes++ { for i := 0; i < 30; i++ { data := make([]byte, bytes) @@ -582,7 +583,7 @@ func TestScalarMult(t *testing.T) { // Use another random exponent on the new point. // We use BaseMult to verify by multiplying the previous exponent // and the new random exponent together (mod N) - s256 := btcec.S256() + s256 := secp256k1.S256() x, y := s256.Gx, s256.Gy exponent := big.NewInt(1) for i := 0; i < 1024; i++ { @@ -603,9 +604,8 @@ func TestScalarMult(t *testing.T) { } // Test this curve's usage with the ecdsa package. - -func testKeyGeneration(t *testing.T, c *btcec.KoblitzCurve, tag string) { - priv, err := btcec.NewPrivateKey(c) +func testKeyGeneration(t *testing.T, c *secp256k1.KoblitzCurve, tag string) { + priv, err := secp256k1.GeneratePrivateKey(c) if err != nil { t.Errorf("%s: error: %s", tag, err) return @@ -616,12 +616,13 @@ func testKeyGeneration(t *testing.T, c *btcec.KoblitzCurve, tag string) { } func TestKeyGeneration(t *testing.T) { - testKeyGeneration(t, btcec.S256(), "S256") + testKeyGeneration(t, secp256k1.S256(), "S256") } -func testSignAndVerify(t *testing.T, c *btcec.KoblitzCurve, tag string) { - priv, _ := btcec.NewPrivateKey(c) - pub := priv.PubKey() +func testSignAndVerify(t *testing.T, c *secp256k1.KoblitzCurve, tag string) { + priv, _ := secp256k1.GeneratePrivateKey(c) + pubx, puby := priv.Public() + pub := secp256k1.NewPublicKey(c, pubx, puby) hashed := []byte("testing") sig, err := priv.Sign(hashed) @@ -641,7 +642,7 @@ func testSignAndVerify(t *testing.T, c *btcec.KoblitzCurve, tag string) { } func TestSignAndVerify(t *testing.T) { - testSignAndVerify(t, btcec.S256(), "S256") + testSignAndVerify(t, secp256k1.S256(), "S256") } func TestNAF(t *testing.T) { @@ -655,7 +656,7 @@ func TestNAF(t *testing.T) { t.Fatalf("failed to read random data at %d", i) break } - nafPos, nafNeg := btcec.NAF(data) + nafPos, nafNeg := secp256k1.NAF(data) want := new(big.Int).SetBytes(data) got := big.NewInt(0) // Check that the NAF representation comes up with the right number @@ -827,8 +828,8 @@ func TestVectors(t *testing.T) { sha := sha1.New() for i, test := range testVectors { - pub := btcec.PublicKey{ - Curve: btcec.S256(), + pub := secp256k1.PublicKey{ + Curve: secp256k1.S256(), X: fromHex(test.Qx), Y: fromHex(test.Qy), } @@ -836,7 +837,7 @@ func TestVectors(t *testing.T) { sha.Reset() sha.Write(msg) hashed := sha.Sum(nil) - sig := btcec.Signature{R: fromHex(test.r), S: fromHex(test.s)} + sig := secp256k1.Signature{R: fromHex(test.r), S: fromHex(test.s)} if fuck := sig.Verify(hashed, &pub); fuck != test.ok { //t.Errorf("%d: bad result %v %v", i, pub, hashed) t.Errorf("%d: bad result %v instead of %v", i, fuck, diff --git a/btcec/ciphering.go b/dcrec/secp256k1/ciphering.go similarity index 95% rename from btcec/ciphering.go rename to dcrec/secp256k1/ciphering.go index d1a6db2a..36717736 100644 --- a/btcec/ciphering.go +++ b/dcrec/secp256k1/ciphering.go @@ -1,8 +1,9 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "bytes" @@ -68,7 +69,7 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte { // The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly, // refer to section 5.8.1 of ANSI X9.63 for rationale on this format. func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) { - ephemeral, err := NewPrivateKey(S256()) + ephemeral, err := GeneratePrivateKey(S256()) if err != nil { return nil, err } @@ -85,7 +86,9 @@ func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) { return nil, err } // start writing public key - pb := ephemeral.PubKey().SerializeUncompressed() + kc, _ := ephemeral.PublicKey.Curve.(*KoblitzCurve) + pbk := NewPublicKey(kc, ephemeral.PublicKey.X, ephemeral.PublicKey.Y) + pb := pbk.SerializeUncompressed() offset := aes.BlockSize // curve and X length diff --git a/btcec/ciphering_test.go b/dcrec/secp256k1/ciphering_test.go similarity index 83% rename from btcec/ciphering_test.go rename to dcrec/secp256k1/ciphering_test.go index 0160f062..548ef1b6 100644 --- a/btcec/ciphering_test.go +++ b/dcrec/secp256k1/ciphering_test.go @@ -1,31 +1,37 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "bytes" "encoding/hex" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec/secp256k1" ) func TestGenerateSharedSecret(t *testing.T) { - privKey1, err := btcec.NewPrivateKey(btcec.S256()) + c := secp256k1.S256() + privKey1, err := secp256k1.GeneratePrivateKey(secp256k1.S256()) if err != nil { t.Errorf("private key generation error: %s", err) return } - privKey2, err := btcec.NewPrivateKey(btcec.S256()) + privKey2, err := secp256k1.GeneratePrivateKey(secp256k1.S256()) if err != nil { t.Errorf("private key generation error: %s", err) return } - secret1 := btcec.GenerateSharedSecret(privKey1, privKey2.PubKey()) - secret2 := btcec.GenerateSharedSecret(privKey2, privKey1.PubKey()) + pk1x, pk1y := privKey1.Public() + pk1 := secp256k1.NewPublicKey(c, pk1x, pk1y) + pk2x, pk2y := privKey2.Public() + pk2 := secp256k1.NewPublicKey(c, pk2x, pk2y) + secret1 := secp256k1.GenerateSharedSecret(privKey1, pk2) + secret2 := secp256k1.GenerateSharedSecret(privKey2, pk1) if !bytes.Equal(secret1, secret2) { t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x", @@ -35,19 +41,22 @@ func TestGenerateSharedSecret(t *testing.T) { // Test 1: Encryption and decryption func TestCipheringBasic(t *testing.T) { - privkey, err := btcec.NewPrivateKey(btcec.S256()) + c := secp256k1.S256() + privkey, err := secp256k1.GeneratePrivateKey(secp256k1.S256()) if err != nil { t.Fatal("failed to generate private key") } in := []byte("Hey there dude. How are you doing? This is a test.") - out, err := btcec.Encrypt(privkey.PubKey(), in) + pk1x, pk1y := privkey.Public() + pk1 := secp256k1.NewPublicKey(c, pk1x, pk1y) + out, err := secp256k1.Encrypt(pk1, in) if err != nil { t.Fatal("failed to encrypt:", err) } - dec, err := btcec.Decrypt(privkey, out) + dec, err := secp256k1.Decrypt(privkey, out) if err != nil { t.Fatal("failed to decrypt:", err) } @@ -61,7 +70,7 @@ func TestCipheringBasic(t *testing.T) { func TestCiphering(t *testing.T) { pb, _ := hex.DecodeString("fe38240982f313ae5afb3e904fb8215fb11af1200592b" + "fca26c96c4738e4bf8f") - privkey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pb) + privkey, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), pb) in := []byte("This is just a test.") out, _ := hex.DecodeString("b0d66e5adaa5ed4e2f0ca68e17b8f2fc02ca002009e3" + @@ -70,7 +79,7 @@ func TestCiphering(t *testing.T) { "9b0ba77cf14348fcff80fee10e11981f1b4be372d93923e9178972f69937ec850ed" + "6c3f11ff572ddd5b2bedf9f9c0b327c54da02a28fcdce1f8369ffec") - dec, err := btcec.Decrypt(privkey, out) + dec, err := secp256k1.Decrypt(privkey, out) if err != nil { t.Fatal("failed to decrypt:", err) } @@ -81,7 +90,7 @@ func TestCiphering(t *testing.T) { } func TestCipheringErrors(t *testing.T) { - privkey, err := btcec.NewPrivateKey(btcec.S256()) + privkey, err := secp256k1.GeneratePrivateKey(secp256k1.S256()) if err != nil { t.Fatal("failed to generate private key") } @@ -154,7 +163,7 @@ func TestCipheringErrors(t *testing.T) { } for i, test := range tests1 { - _, err = btcec.Decrypt(privkey, test.ciphertext) + _, err = secp256k1.Decrypt(privkey, test.ciphertext) if err == nil { t.Errorf("Decrypt #%d did not get error", i) } @@ -168,7 +177,7 @@ func TestCipheringErrors(t *testing.T) { {bytes.Repeat([]byte{0x07}, 15)}, } for i, test := range tests2 { - _, err = btcec.TstRemovePKCSPadding(test.in) + _, err = secp256k1.TstRemovePKCSPadding(test.in) if err == nil { t.Errorf("removePKCSPadding #%d did not get error", i) } diff --git a/btcec/doc.go b/dcrec/secp256k1/doc.go similarity index 73% rename from btcec/doc.go rename to dcrec/secp256k1/doc.go index fa8346ab..6cc06f2b 100644 --- a/btcec/doc.go +++ b/dcrec/secp256k1/doc.go @@ -1,11 +1,12 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package btcec implements support for the elliptic curves needed for bitcoin. +Package dcrec implements support for the elliptic curves needed for decred. -Bitcoin uses elliptic curve cryptography using koblitz curves +Decred uses elliptic curve cryptography using koblitz curves (specifically secp256k1) for cryptographic functions. See http://www.secg.org/collateral/sec2_final.pdf for details on the standard. @@ -14,8 +15,8 @@ This package provides the data structures and functions implementing the crypto/elliptic Curve interface in order to permit using these curves with the standard crypto/ecdsa package provided with go. Helper functionality is provided to parse signatures and public keys from -standard formats. It was designed for use with btcd, but should be +standard formats. It was designed for use with dcrd, but should be general enough for other uses of elliptic curve crypto. It was originally based on some initial work by ThePiachu, but has significantly diverged since then. */ -package btcec +package secp256k1 diff --git a/btcec/example_test.go b/dcrec/secp256k1/example_test.go similarity index 75% rename from btcec/example_test.go rename to dcrec/secp256k1/example_test.go index 00fe7f84..02cfdabd 100644 --- a/btcec/example_test.go +++ b/dcrec/secp256k1/example_test.go @@ -1,15 +1,16 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/dcrec/secp256k1" ) // This example demonstrates signing a message with a secp256k1 private key that @@ -22,11 +23,11 @@ func Example_signMessage() { fmt.Println(err) return } - privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes) + privKey, pubKey := secp256k1.PrivKeyFromBytes(secp256k1.S256(), pkBytes) // Sign a message using the private key. message := "test message" - messageHash := wire.DoubleSha256([]byte(message)) + messageHash := chainhash.HashFuncB([]byte(message)) signature, err := privKey.Sign(messageHash) if err != nil { fmt.Println(err) @@ -41,7 +42,7 @@ func Example_signMessage() { fmt.Printf("Signature Verified? %v\n", verified) // Output: - // Serialized Signature: 304402201008e236fa8cd0f25df4482dddbb622e8a8b26ef0ba731719458de3ccd93805b022032f8ebe514ba5f672466eba334639282616bb3c2f0ab09998037513d1f9e3d6d + // Serialized Signature: 3045022100fcc0a8768cfbcefcf2cadd7cfb0fb18ed08dd2e2ae84bef1a474a3d351b26f0302200fc1a350b45f46fa00101391302818d748c2b22615511a3ffd5bb638bd777207 // Signature Verified? true } @@ -56,22 +57,22 @@ func Example_verifySignature() { fmt.Println(err) return } - pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256()) + pubKey, err := secp256k1.ParsePubKey(pubKeyBytes, secp256k1.S256()) if err != nil { fmt.Println(err) return } // Decode hex-encoded serialized signature. - sigBytes, err := hex.DecodeString("30450220090ebfb3690a0ff115bb1b38b" + - "8b323a667b7653454f1bccb06d4bbdca42c2079022100ec95778b51e707" + - "1cb1205f8bde9af6592fc978b0452dafe599481c46d6b2e479") + sigBytes, err := hex.DecodeString("3045022100fcc0a8768cfbcefcf2cadd7cfb0" + + "fb18ed08dd2e2ae84bef1a474a3d351b26f0302200fc1a350b45f46fa0010139130" + + "2818d748c2b22615511a3ffd5bb638bd777207") if err != nil { fmt.Println(err) return } - signature, err := btcec.ParseSignature(sigBytes, btcec.S256()) + signature, err := secp256k1.ParseSignature(sigBytes, secp256k1.S256()) if err != nil { fmt.Println(err) return @@ -79,7 +80,7 @@ func Example_verifySignature() { // Verify the signature for the message using the public key. message := "test message" - messageHash := wire.DoubleSha256([]byte(message)) + messageHash := chainhash.HashFuncB([]byte(message)) verified := signature.Verify(messageHash, pubKey) fmt.Println("Signature Verified?", verified) @@ -98,7 +99,7 @@ func Example_encryptMessage() { fmt.Println(err) return } - pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256()) + pubKey, err := secp256k1.ParsePubKey(pubKeyBytes, secp256k1.S256()) if err != nil { fmt.Println(err) return @@ -106,7 +107,7 @@ func Example_encryptMessage() { // Encrypt a message decryptable by the private key corresponding to pubKey message := "test message" - ciphertext, err := btcec.Encrypt(pubKey, []byte(message)) + ciphertext, err := secp256k1.Encrypt(pubKey, []byte(message)) if err != nil { fmt.Println(err) return @@ -120,10 +121,10 @@ func Example_encryptMessage() { return } // note that we already have corresponding pubKey - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes) + privKey, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), pkBytes) // Try decrypting and verify if it's the same message. - plaintext, err := btcec.Decrypt(privKey, ciphertext) + plaintext, err := secp256k1.Decrypt(privKey, ciphertext) if err != nil { fmt.Println(err) return @@ -146,7 +147,7 @@ func Example_decryptMessage() { return } - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes) + privKey, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), pkBytes) ciphertext, err := hex.DecodeString("35f644fbfb208bc71e57684c3c8b437402ca" + "002047a2f1b38aa1a8f1d5121778378414f708fe13ebf7b4a7bb74407288c1958969" + @@ -155,7 +156,7 @@ func Example_decryptMessage() { "d14174f8b83354fac3ff56075162") // Try decrypting the message. - plaintext, err := btcec.Decrypt(privKey, ciphertext) + plaintext, err := secp256k1.Decrypt(privKey, ciphertext) if err != nil { fmt.Println(err) return diff --git a/btcec/field.go b/dcrec/secp256k1/field.go similarity index 96% rename from btcec/field.go rename to dcrec/secp256k1/field.go index 6e1d176a..de5d32af 100644 --- a/btcec/field.go +++ b/dcrec/secp256k1/field.go @@ -1,9 +1,10 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Copyright (c) 2013-2014 Dave Collins // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 // References: // [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone. @@ -104,7 +105,7 @@ const ( fieldPrimeWordOne = 0x3ffffbf ) -// fieldVal implements optimized fixed-precision arithmetic over the +// FieldVal implements optimized fixed-precision arithmetic over the // secp256k1 finite field. This means all arithmetic is performed modulo // 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. It // represents each 256-bit value as 10 32-bit integers in base 2^26. This @@ -134,20 +135,20 @@ const ( // n[1] * 2^(26*1) = 2^23 * 2^26 = 2^49 // n[0] * 2^(26*0) = 1 * 2^0 = 1 // Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1 -type fieldVal struct { +type FieldVal struct { n [10]uint32 } // String returns the field value as a human-readable hex string. -func (f fieldVal) String() string { - t := new(fieldVal).Set(&f).Normalize() +func (f FieldVal) String() string { + t := new(FieldVal).Set(&f).Normalize() return hex.EncodeToString(t.Bytes()[:]) } // Zero sets the field value to zero. A newly created field value is already // set to zero. This function can be useful to clear an existing field value // for reuse. -func (f *fieldVal) Zero() { +func (f *FieldVal) Zero() { f.n[0] = 0 f.n[1] = 0 f.n[2] = 0 @@ -163,9 +164,9 @@ func (f *fieldVal) Zero() { // Set sets the field value equal to the passed value. // // The field value is returned to support chaining. This enables syntax like: -// f := new(fieldVal).Set(f2).Add(1) so that f = f2 + 1 where f2 is not +// f := new(FieldVal).Set(f2).Add(1) so that f = f2 + 1 where f2 is not // modified. -func (f *fieldVal) Set(val *fieldVal) *fieldVal { +func (f *FieldVal) Set(val *FieldVal) *FieldVal { *f = *val return f } @@ -175,8 +176,8 @@ func (f *fieldVal) Set(val *fieldVal) *fieldVal { // native integers. // // The field value is returned to support chaining. This enables syntax such -// as f := new(fieldVal).SetInt(2).Mul(f2) so that f = 2 * f2. -func (f *fieldVal) SetInt(ui uint) *fieldVal { +// as f := new(FieldVal).SetInt(2).Mul(f2) so that f = 2 * f2. +func (f *FieldVal) SetInt(ui uint) *FieldVal { f.Zero() f.n[0] = uint32(ui) return f @@ -186,8 +187,8 @@ func (f *fieldVal) SetInt(ui uint) *fieldVal { // value representation. // // The field value is returned to support chaining. This enables syntax like: -// f := new(fieldVal).SetBytes(byteArray).Mul(f2) so that f = ba * f2. -func (f *fieldVal) SetBytes(b *[32]byte) *fieldVal { +// f := new(FieldVal).SetBytes(byteArray).Mul(f2) so that f = ba * f2. +func (f *FieldVal) SetBytes(b *[32]byte) *FieldVal { // Pack the 256 total bits across the 10 uint32 words with a max of // 26-bits per word. This could be done with a couple of for loops, // but this unrolled version is significantly faster. Benchmarks show @@ -220,8 +221,8 @@ func (f *fieldVal) SetBytes(b *[32]byte) *fieldVal { // will be truncated. // // The field value is returned to support chaining. This enables syntax like: -// f := new(fieldVal).SetByteSlice(byteSlice) -func (f *fieldVal) SetByteSlice(b []byte) *fieldVal { +// f := new(FieldVal).SetByteSlice(byteSlice) +func (f *FieldVal) SetByteSlice(b []byte) *FieldVal { var b32 [32]byte for i := 0; i < len(b); i++ { if i < 32 { @@ -235,8 +236,8 @@ func (f *fieldVal) SetByteSlice(b []byte) *fieldVal { // representation. Only the first 32-bytes are used. // // The field value is returned to support chaining. This enables syntax like: -// f := new(fieldVal).SetHex("0abc").Add(1) so that f = 0x0abc + 1 -func (f *fieldVal) SetHex(hexString string) *fieldVal { +// f := new(FieldVal).SetHex("0abc").Add(1) so that f = 0x0abc + 1 +func (f *FieldVal) SetHex(hexString string) *FieldVal { if len(hexString)%2 != 0 { hexString = "0" + hexString } @@ -247,7 +248,7 @@ func (f *fieldVal) SetHex(hexString string) *fieldVal { // Normalize normalizes the internal field words into the desired range and // performs fast modular reduction over the secp256k1 prime by making use of the // special form of the prime. -func (f *fieldVal) Normalize() *fieldVal { +func (f *FieldVal) Normalize() *FieldVal { // The field representation leaves 6 bits of overflow in each // word so intermediate calculations can be performed without needing // to propagate the carry to each higher word during the calculations. @@ -414,7 +415,7 @@ func (f *fieldVal) Normalize() *fieldVal { // // The field value must be normalized for this function to return the correct // result. -func (f *fieldVal) PutBytes(b *[32]byte) { +func (f *FieldVal) PutBytes(b *[32]byte) { // Unpack the 256 total bits from the 10 uint32 words with a max of // 26-bits per word. This could be done with a couple of for loops, // but this unrolled version is a bit faster. Benchmarks show this is @@ -460,14 +461,14 @@ func (f *fieldVal) PutBytes(b *[32]byte) { // // The field value must be normalized for this function to return correct // result. -func (f *fieldVal) Bytes() *[32]byte { +func (f *FieldVal) Bytes() *[32]byte { b := new([32]byte) f.PutBytes(b) return b } // IsZero returns whether or not the field value is equal to zero. -func (f *fieldVal) IsZero() bool { +func (f *FieldVal) IsZero() bool { // The value can only be zero if no bits are set in any of the words. // This is a constant time implementation. bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] | @@ -480,7 +481,7 @@ func (f *fieldVal) IsZero() bool { // // The field value must be normalized for this function to return correct // result. -func (f *fieldVal) IsOdd() bool { +func (f *FieldVal) IsOdd() bool { // Only odd numbers have the bottom bit set. return f.n[0]&1 == 1 } @@ -488,7 +489,7 @@ func (f *fieldVal) IsOdd() bool { // Equals returns whether or not the two field values are the same. Both // field values being compared must be normalized for this function to return // the correct result. -func (f *fieldVal) Equals(val *fieldVal) bool { +func (f *FieldVal) Equals(val *FieldVal) bool { // Xor only sets bits when they are different, so the two field values // can only be the same if no bits are set after xoring each word. // This is a constant time implementation. @@ -505,7 +506,7 @@ func (f *fieldVal) Equals(val *fieldVal) bool { // // The field value is returned to support chaining. This enables syntax like: // f.NegateVal(f2).AddInt(1) so that f = -f2 + 1. -func (f *fieldVal) NegateVal(val *fieldVal, magnitude uint32) *fieldVal { +func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal { // Negation in the field is just the prime minus the value. However, // in order to allow negation against a field value without having to // normalize/reduce it first, multiply by the magnitude (that is how @@ -543,7 +544,7 @@ func (f *fieldVal) NegateVal(val *fieldVal, magnitude uint32) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.Negate().AddInt(1) so that f = -f + 1. -func (f *fieldVal) Negate(magnitude uint32) *fieldVal { +func (f *FieldVal) Negate(magnitude uint32) *FieldVal { return f.NegateVal(f, magnitude) } @@ -553,7 +554,7 @@ func (f *fieldVal) Negate(magnitude uint32) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.AddInt(1).Add(f2) so that f = f + 1 + f2. -func (f *fieldVal) AddInt(ui uint) *fieldVal { +func (f *FieldVal) AddInt(ui uint) *FieldVal { // Since the field representation intentionally provides overflow bits, // it's ok to use carryless addition as the carry bit is safely part of // the word and will be normalized out. @@ -567,7 +568,7 @@ func (f *fieldVal) AddInt(ui uint) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.Add(f2).AddInt(1) so that f = f + f2 + 1. -func (f *fieldVal) Add(val *fieldVal) *fieldVal { +func (f *FieldVal) Add(val *FieldVal) *FieldVal { // Since the field representation intentionally provides overflow bits, // it's ok to use carryless addition as the carry bit is safely part of // each word and will be normalized out. This could obviously be done @@ -590,7 +591,7 @@ func (f *fieldVal) Add(val *fieldVal) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f3.Add2(f, f2).AddInt(1) so that f3 = f + f2 + 1. -func (f *fieldVal) Add2(val *fieldVal, val2 *fieldVal) *fieldVal { +func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal { // Since the field representation intentionally provides overflow bits, // it's ok to use carryless addition as the carry bit is safely part of // each word and will be normalized out. This could obviously be done @@ -616,7 +617,7 @@ func (f *fieldVal) Add2(val *fieldVal, val2 *fieldVal) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.MulInt(2).Add(f2) so that f = 2 * f + f2. -func (f *fieldVal) MulInt(val uint) *fieldVal { +func (f *FieldVal) MulInt(val uint) *FieldVal { // Since each word of the field representation can hold up to // fieldOverflowBits extra bits which will be normalized out, it's safe // to multiply each word without using a larger type or carry @@ -646,7 +647,7 @@ func (f *fieldVal) MulInt(val uint) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.Mul(f2).AddInt(1) so that f = (f * f2) + 1. -func (f *fieldVal) Mul(val *fieldVal) *fieldVal { +func (f *FieldVal) Mul(val *FieldVal) *FieldVal { return f.Mul2(f, val) } @@ -658,7 +659,7 @@ func (f *fieldVal) Mul(val *fieldVal) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f3.Mul2(f, f2).AddInt(1) so that f3 = (f * f2) + 1. -func (f *fieldVal) Mul2(val *fieldVal, val2 *fieldVal) *fieldVal { +func (f *FieldVal) Mul2(val *FieldVal, val2 *FieldVal) *FieldVal { // This could be done with a couple of for loops and an array to store // the intermediate terms, but this unrolled version is significantly // faster. @@ -927,7 +928,7 @@ func (f *fieldVal) Mul2(val *fieldVal, val2 *fieldVal) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.Square().Mul(f2) so that f = f^2 * f2. -func (f *fieldVal) Square() *fieldVal { +func (f *FieldVal) Square() *FieldVal { return f.SquareVal(f) } @@ -938,7 +939,7 @@ func (f *fieldVal) Square() *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f3.SquareVal(f).Mul(f) so that f3 = f^2 * f = f^3. -func (f *fieldVal) SquareVal(val *fieldVal) *fieldVal { +func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal { // This could be done with a couple of for loops and an array to store // the intermediate terms, but this unrolled version is significantly // faster. @@ -1158,7 +1159,7 @@ func (f *fieldVal) SquareVal(val *fieldVal) *fieldVal { // // The field value is returned to support chaining. This enables syntax like: // f.Inverse().Mul(f2) so that f = f^-1 * f2. -func (f *fieldVal) Inverse() *fieldVal { +func (f *FieldVal) Inverse() *FieldVal { // Fermat's little theorem states that for a nonzero number a and prime // prime p, a^(p-1) = 1 (mod p). Since the multipliciative inverse is // a*b = 1 (mod p), it follows that b = a*a^(p-2) = a^(p-1) = 1 (mod p). @@ -1172,7 +1173,7 @@ func (f *fieldVal) Inverse() *fieldVal { // The secp256k1 prime - 2 is 2^256 - 4294968275. // // This has a cost of 258 field squarings and 33 field multiplications. - var a2, a3, a4, a10, a11, a21, a42, a45, a63, a1019, a1023 fieldVal + var a2, a3, a4, a10, a11, a21, a42, a45, a63, a1019, a1023 FieldVal a2.SquareVal(f) a3.Mul2(&a2, f) a4.SquareVal(&a2) diff --git a/btcec/field_test.go b/dcrec/secp256k1/field_test.go similarity index 92% rename from btcec/field_test.go rename to dcrec/secp256k1/field_test.go index 39d0ad32..7ce04f3e 100644 --- a/btcec/field_test.go +++ b/dcrec/secp256k1/field_test.go @@ -1,15 +1,16 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Copyright (c) 2013-2014 Dave Collins // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec/secp256k1" ) // TestSetInt ensures that setting a field value to various native integers @@ -30,7 +31,7 @@ func TestSetInt(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetInt(test.in) + f := secp256k1.NewFieldVal().SetInt(test.in) result := f.TstRawInts() if !reflect.DeepEqual(result, test.raw) { t.Errorf("fieldVal.Set #%d wrong result\ngot: %v\n"+ @@ -42,7 +43,7 @@ func TestSetInt(t *testing.T) { // TestZero ensures that zeroing a field value zero works as expected. func TestZero(t *testing.T) { - f := btcec.NewFieldVal().SetInt(2) + f := secp256k1.NewFieldVal().SetInt(2) f.Zero() for idx, rawInt := range f.TstRawInts() { if rawInt != 0 { @@ -54,7 +55,7 @@ func TestZero(t *testing.T) { // TestIsZero ensures that checking if a field IsZero works as expected. func TestIsZero(t *testing.T) { - f := btcec.NewFieldVal() + f := secp256k1.NewFieldVal() if !f.IsZero() { t.Errorf("new field value is not zero - got %v (rawints %x)", f, f.TstRawInts()) @@ -128,7 +129,7 @@ func TestStringer(t *testing.T) { "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, - // 2^256-4294968273 (the btcec prime, so should result in 0) + // 2^256-4294968273 (the dcrec prime, so should result in 0) { "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0000000000000000000000000000000000000000000000000000000000000000", @@ -147,7 +148,7 @@ func TestStringer(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in) + f := secp256k1.NewFieldVal().SetHex(test.in) result := f.String() if result != test.expected { t.Errorf("fieldVal.String #%d wrong result\ngot: %v\n"+ @@ -242,7 +243,7 @@ func TestNormalize(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().TstSetRawInts(test.raw).Normalize() + f := secp256k1.NewFieldVal().TstSetRawInts(test.raw).Normalize() result := f.TstRawInts() if !reflect.DeepEqual(result, test.normalized) { t.Errorf("fieldVal.Set #%d wrong normalized result\n"+ @@ -271,7 +272,7 @@ func TestIsOdd(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in) + f := secp256k1.NewFieldVal().SetHex(test.in) result := f.IsOdd() if result != test.expected { t.Errorf("fieldVal.IsOdd #%d wrong result\n"+ @@ -304,8 +305,8 @@ func TestEquals(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - f2 := btcec.NewFieldVal().SetHex(test.in2).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + f2 := secp256k1.NewFieldVal().SetHex(test.in2).Normalize() result := f.Equals(f2) if result != test.expected { t.Errorf("fieldVal.Equals #%d wrong result\n"+ @@ -352,8 +353,8 @@ func TestNegate(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Negate(1).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Negate #%d wrong result\n"+ @@ -403,8 +404,8 @@ func TestAddInt(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.AddInt(test.in2).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.AddInt #%d wrong result\n"+ @@ -454,9 +455,9 @@ func TestAdd(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - f2 := btcec.NewFieldVal().SetHex(test.in2).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + f2 := secp256k1.NewFieldVal().SetHex(test.in2).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Add(f2).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Add #%d wrong result\n"+ @@ -506,9 +507,9 @@ func TestAdd2(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - f2 := btcec.NewFieldVal().SetHex(test.in2).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + f2 := secp256k1.NewFieldVal().SetHex(test.in2).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Add2(f, f2).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Add2 #%d wrong result\n"+ @@ -571,8 +572,8 @@ func TestMulInt(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.MulInt(test.in2).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.MulInt #%d wrong result\n"+ @@ -632,9 +633,9 @@ func TestMul(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in1).Normalize() - f2 := btcec.NewFieldVal().SetHex(test.in2).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in1).Normalize() + f2 := secp256k1.NewFieldVal().SetHex(test.in2).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Mul(f2).Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Mul #%d wrong result\n"+ @@ -679,8 +680,8 @@ func TestSquare(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Square().Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Square #%d wrong result\n"+ @@ -732,8 +733,8 @@ func TestInverse(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - f := btcec.NewFieldVal().SetHex(test.in).Normalize() - expected := btcec.NewFieldVal().SetHex(test.expected).Normalize() + f := secp256k1.NewFieldVal().SetHex(test.in).Normalize() + expected := secp256k1.NewFieldVal().SetHex(test.expected).Normalize() result := f.Inverse().Normalize() if !result.Equals(expected) { t.Errorf("fieldVal.Inverse #%d wrong result\n"+ diff --git a/btcec/genprecomps.go b/dcrec/secp256k1/genprecomps.go similarity index 88% rename from btcec/genprecomps.go rename to dcrec/secp256k1/genprecomps.go index d4a9c1b8..dd8ca9b8 100644 --- a/btcec/genprecomps.go +++ b/dcrec/secp256k1/genprecomps.go @@ -1,4 +1,5 @@ // Copyright 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -17,7 +18,7 @@ import ( "log" "os" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec" ) func main() { @@ -28,7 +29,7 @@ func main() { defer fi.Close() // Compress the serialized byte points. - serialized := btcec.S256().SerializedBytePoints() + serialized := dcrec.S256().SerializedBytePoints() var compressed bytes.Buffer w := zlib.NewWriter(&compressed) if _, err := w.Write(serialized); err != nil { @@ -45,14 +46,14 @@ func main() { fmt.Fprintln(fi, "// Use of this source code is governed by an ISC") fmt.Fprintln(fi, "// license that can be found in the LICENSE file.") fmt.Fprintln(fi) - fmt.Fprintln(fi, "package btcec") + fmt.Fprintln(fi, "package dcrec") fmt.Fprintln(fi) fmt.Fprintln(fi, "// Auto-generated file (see genprecomps.go)") fmt.Fprintln(fi, "// DO NOT EDIT") fmt.Fprintln(fi) fmt.Fprintf(fi, "var secp256k1BytePoints = %q\n", string(encoded)) - a1, b1, a2, b2 := btcec.S256().EndomorphismVectors() + a1, b1, a2, b2 := dcrec.S256().EndomorphismVectors() fmt.Println("The following values are the computed linearly " + "independent vectors needed to make use of the secp256k1 " + "endomorphism:") diff --git a/btcec/gensecp256k1.go b/dcrec/secp256k1/gensecp256k1.go similarity index 98% rename from btcec/gensecp256k1.go rename to dcrec/secp256k1/gensecp256k1.go index 1928702d..9e646348 100644 --- a/btcec/gensecp256k1.go +++ b/dcrec/secp256k1/gensecp256k1.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,7 +7,7 @@ // This build tag is set during go generate. // +build gensecp256k1 -package btcec +package secp256k1 // References: // [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) diff --git a/btcec/internal_test.go b/dcrec/secp256k1/internal_test.go similarity index 77% rename from btcec/internal_test.go rename to dcrec/secp256k1/internal_test.go index 23f35684..24621ab3 100644 --- a/btcec/internal_test.go +++ b/dcrec/secp256k1/internal_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "math/big" @@ -17,34 +18,34 @@ const ( // TstRawInts allows the test package to get the integers from the internal // field representation for ensuring correctness. It is only available during // the tests. -func (f *fieldVal) TstRawInts() [10]uint32 { +func (f *FieldVal) TstRawInts() [10]uint32 { return f.n } // TstSetRawInts allows the test package to directly set the integers used by // the internal field representation. It is only available during the tests. -func (f *fieldVal) TstSetRawInts(raw [10]uint32) *fieldVal { +func (f *FieldVal) TstSetRawInts(raw [10]uint32) *FieldVal { for i := 0; i < len(raw); i++ { f.n[i] = raw[i] } return f } -// TstFieldJacobianToBigAffine makes the internal fieldJacobianToBigAffine +// TstFieldJacobianToBigAffine makes the internal FieldJacobianToBigAffine // function available to the test package. -func (curve *KoblitzCurve) TstFieldJacobianToBigAffine(x, y, z *fieldVal) (*big.Int, *big.Int) { - return curve.fieldJacobianToBigAffine(x, y, z) +func (curve *KoblitzCurve) TstFieldJacobianToBigAffine(x, y, z *FieldVal) (*big.Int, *big.Int) { + return curve.FieldJacobianToBigAffine(x, y, z) } // TstIsJacobianOnCurve returns boolean if the point (x,y,z) is on the curve. -func (curve *KoblitzCurve) TstIsJacobianOnCurve(x, y, z *fieldVal) bool { +func (curve *KoblitzCurve) TstIsJacobianOnCurve(x, y, z *FieldVal) bool { // Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7 // In Jacobian coordinates, Y = y/z^3 and X = x/z^2 // Thus: // (y/z^3)^2 = (x/z^2)^3 + 7 // y^2/z^6 = x^3/z^6 + 7 // y^2 = x^3 + 7*z^6 - var y2, z2, x3, result fieldVal + var y2, z2, x3, result FieldVal y2.SquareVal(y).Normalize() z2.SquareVal(z) x3.SquareVal(x).Mul(x) @@ -54,25 +55,25 @@ func (curve *KoblitzCurve) TstIsJacobianOnCurve(x, y, z *fieldVal) bool { // TstAddJacobian makes the internal addJacobian function available to the test // package. -func (curve *KoblitzCurve) TstAddJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldVal) { - curve.addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3) +func (curve *KoblitzCurve) TstAddJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *FieldVal) { + curve.AddJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3) } // TstDoubleJacobian makes the internal doubleJacobian function available to the test // package. -func (curve *KoblitzCurve) TstDoubleJacobian(x1, y1, z1, x3, y3, z3 *fieldVal) { +func (curve *KoblitzCurve) TstDoubleJacobian(x1, y1, z1, x3, y3, z3 *FieldVal) { curve.doubleJacobian(x1, y1, z1, x3, y3, z3) } // NewFieldVal returns a new field value set to 0. This is only available to // the test package. -func NewFieldVal() *fieldVal { - return new(fieldVal) +func NewFieldVal() *FieldVal { + return new(FieldVal) } // TstNonceRFC6979 makes the nonceRFC6979 function available to the test package. func TstNonceRFC6979(privkey *big.Int, hash []byte) *big.Int { - return nonceRFC6979(privkey, hash) + return NonceRFC6979(privkey, hash, nil, nil) } // TstRemovePKCSPadding makes the internal removePKCSPadding function available diff --git a/btcec/precompute.go b/dcrec/secp256k1/precompute.go similarity index 95% rename from btcec/precompute.go rename to dcrec/secp256k1/precompute.go index 034cd553..86fb43e8 100644 --- a/btcec/precompute.go +++ b/dcrec/secp256k1/precompute.go @@ -1,8 +1,9 @@ // Copyright 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "compress/zlib" @@ -41,7 +42,7 @@ func loadS256BytePoints() error { // Deserialize the precomputed byte points and set the curve to them. offset := 0 - var bytePoints [32][256][3]fieldVal + var bytePoints [32][256][3]FieldVal for byteNum := 0; byteNum < 32; byteNum++ { // All points in this window. for i := 0; i < 256; i++ { diff --git a/btcec/privkey.go b/dcrec/secp256k1/privkey.go similarity index 53% rename from btcec/privkey.go rename to dcrec/secp256k1/privkey.go index 438323e6..d7f522e8 100644 --- a/btcec/privkey.go +++ b/dcrec/secp256k1/privkey.go @@ -1,12 +1,14 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "crypto/ecdsa" "crypto/rand" + "io" "math/big" ) @@ -15,6 +17,15 @@ import ( // package. type PrivateKey ecdsa.PrivateKey +// NewPrivateKey instantiates a new private key from a scalar encoded as a +// big integer. +func NewPrivateKey(curve *KoblitzCurve, d *big.Int) *PrivateKey { + b := make([]byte, 0, PrivKeyBytesLen) + dB := paddedAppend(PrivKeyBytesLen, b, d.Bytes()) + priv, _ := PrivKeyFromBytes(curve, dB) + return priv +} + // PrivKeyFromBytes returns a private and public key for `curve' based on the // private key passed as an argument as a byte slice. func PrivKeyFromBytes(curve *KoblitzCurve, pk []byte) (*PrivateKey, @@ -33,9 +44,15 @@ func PrivKeyFromBytes(curve *KoblitzCurve, pk []byte) (*PrivateKey, return (*PrivateKey)(priv), (*PublicKey)(&priv.PublicKey) } +// PrivKeyFromScalar is the same as PrivKeyFromBytes in secp256k1. +func PrivKeyFromScalar(curve *KoblitzCurve, s []byte) (*PrivateKey, + *PublicKey) { + return PrivKeyFromBytes(curve, s) +} + // NewPrivateKey is a wrapper for ecdsa.GenerateKey that returns a PrivateKey // instead of the normal ecdsa.PrivateKey. -func NewPrivateKey(curve *KoblitzCurve) (*PrivateKey, error) { +func GeneratePrivateKey(curve *KoblitzCurve) (*PrivateKey, error) { key, err := ecdsa.GenerateKey(curve, rand.Reader) if err != nil { return nil, err @@ -43,9 +60,21 @@ func NewPrivateKey(curve *KoblitzCurve) (*PrivateKey, error) { return (*PrivateKey)(key), nil } +// GenerateKey generates a key using a random number generator, returning +// the private scalar and the corresponding public key points. +func GenerateKey(curve *KoblitzCurve, rand io.Reader) (priv []byte, x, + y *big.Int, err error) { + key, err := ecdsa.GenerateKey(curve, rand) + priv = key.D.Bytes() + x = key.PublicKey.X + y = key.PublicKey.Y + + return +} + // PubKey returns the PublicKey corresponding to this private key. -func (p *PrivateKey) PubKey() *PublicKey { - return (*PublicKey)(&p.PublicKey) +func (p PrivateKey) Public() (*big.Int, *big.Int) { + return p.PublicKey.X, p.PublicKey.Y } // ToECDSA returns the private key as a *ecdsa.PrivateKey. @@ -53,10 +82,10 @@ func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey { return (*ecdsa.PrivateKey)(p) } -// Sign generates an ECDSA signature for the provided hash (which should be the result -// of hashing a larger message) using the private key. Produced signature -// is deterministic (same message and same key yield the same signature) and canonical -// in accordance with RFC6979 and BIP0062. +// Sign generates an ECDSA signature for the provided hash (which should be the +// result of hashing a larger message) using the private key. Produced signature +// is deterministic (same message and same key yield the same signature) and +// canonical in accordance with RFC6979 and BIP0062. func (p *PrivateKey) Sign(hash []byte) (*Signature, error) { return signRFC6979(p, hash) } @@ -66,7 +95,22 @@ const PrivKeyBytesLen = 32 // Serialize returns the private key number d as a big-endian binary-encoded // number, padded to a length of 32 bytes. -func (p *PrivateKey) Serialize() []byte { +func (p PrivateKey) Serialize() []byte { b := make([]byte, 0, PrivKeyBytesLen) return paddedAppend(PrivKeyBytesLen, b, p.ToECDSA().D.Bytes()) } + +// SerializeSecret satisfies the chainec PrivateKey interface. +func (p PrivateKey) SerializeSecret() []byte { + return p.Serialize() +} + +// GetD satisfies the chainec PrivateKey interface. +func (p PrivateKey) GetD() *big.Int { + return p.D +} + +// GetType satisfies the chainec PrivateKey interface. +func (p PrivateKey) GetType() int { + return ecTypeSecp256k1 +} diff --git a/btcec/privkey_test.go b/dcrec/secp256k1/privkey_test.go similarity index 81% rename from btcec/privkey_test.go rename to dcrec/secp256k1/privkey_test.go index 6fd787e2..a8f23839 100644 --- a/btcec/privkey_test.go +++ b/dcrec/secp256k1/privkey_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "bytes" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec/secp256k1" ) func TestPrivKeys(t *testing.T) { @@ -28,10 +29,10 @@ func TestPrivKeys(t *testing.T) { } for _, test := range tests { - priv, pub := btcec.PrivKeyFromBytes(btcec.S256(), test.key) + priv, pub := secp256k1.PrivKeyFromBytes(secp256k1.S256(), test.key) - _, err := btcec.ParsePubKey( - pub.SerializeUncompressed(), btcec.S256()) + _, err := secp256k1.ParsePubKey( + pub.SerializeUncompressed(), secp256k1.S256()) if err != nil { t.Errorf("%s privkey: %v", test.name, err) continue diff --git a/btcec/pubkey.go b/dcrec/secp256k1/pubkey.go similarity index 75% rename from btcec/pubkey.go rename to dcrec/secp256k1/pubkey.go index d8b06bfd..b7bea2a5 100644 --- a/btcec/pubkey.go +++ b/dcrec/secp256k1/pubkey.go @@ -1,8 +1,9 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "crypto/ecdsa" @@ -22,9 +23,9 @@ func isOdd(a *big.Int) bool { return a.Bit(0) == 1 } -// decompressPoint decompresses a point on the given curve given the X point and +// DecompressPoint decompresses a point on the given curve given the X point and // the solution to use. -func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) { +func DecompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) { // TODO(oga) This will probably only work for secp256k1 due to // optimisations. @@ -54,10 +55,16 @@ const ( pubkeyHybrid byte = 0x6 // y_bit + x coord + y coord ) +// NewPublicKey instantiates a new public key with the given X,Y coordinates. +func NewPublicKey(curve *KoblitzCurve, x *big.Int, y *big.Int) *PublicKey { + return &PublicKey{curve, x, y} +} + // ParsePubKey parses a public key for a koblitz curve from a bytestring into a // ecdsa.Publickey, verifying that it is valid. It supports compressed, // uncompressed and hybrid signature formats. -func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err error) { +func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, + err error) { pubkey := PublicKey{} pubkey.Curve = curve @@ -91,7 +98,7 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err "pubkey string: %d", pubKeyStr[0]) } pubkey.X = new(big.Int).SetBytes(pubKeyStr[1:33]) - pubkey.Y, err = decompressPoint(curve, pubkey.X, ybit) + pubkey.Y, err = DecompressPoint(curve, pubkey.X, ybit) if err != nil { return nil, err } @@ -107,7 +114,8 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err return nil, fmt.Errorf("pubkey Y parameter is >= to P") } if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) { - return nil, fmt.Errorf("pubkey isn't on secp256k1 curve") + return nil, fmt.Errorf("pubkey [%v,%v] isn't on secp256k1 curve", + pubkey.X, pubkey.Y) } return &pubkey, nil } @@ -117,13 +125,20 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err type PublicKey ecdsa.PublicKey // ToECDSA returns the public key as a *ecdsa.PublicKey. -func (p *PublicKey) ToECDSA() *ecdsa.PublicKey { - return (*ecdsa.PublicKey)(p) +func (p PublicKey) ToECDSA() *ecdsa.PublicKey { + ecpk := ecdsa.PublicKey(p) + return &ecpk +} + +// SerializeUncompressed serializes a public key in a 33-byte compressed format. +// It is the default serialization method. +func (p PublicKey) Serialize() []byte { + return p.SerializeCompressed() } // SerializeUncompressed serializes a public key in a 65-byte uncompressed // format. -func (p *PublicKey) SerializeUncompressed() []byte { +func (p PublicKey) SerializeUncompressed() []byte { b := make([]byte, 0, PubKeyBytesLenUncompressed) b = append(b, pubkeyUncompressed) b = paddedAppend(32, b, p.X.Bytes()) @@ -131,7 +146,7 @@ func (p *PublicKey) SerializeUncompressed() []byte { } // SerializeCompressed serializes a public key in a 33-byte compressed format. -func (p *PublicKey) SerializeCompressed() []byte { +func (p PublicKey) SerializeCompressed() []byte { b := make([]byte, 0, PubKeyBytesLenCompressed) format := pubkeyCompressed if isOdd(p.Y) { @@ -142,7 +157,7 @@ func (p *PublicKey) SerializeCompressed() []byte { } // SerializeHybrid serializes a public key in a 65-byte hybrid format. -func (p *PublicKey) SerializeHybrid() []byte { +func (p PublicKey) SerializeHybrid() []byte { b := make([]byte, 0, PubKeyBytesLenHybrid) format := pubkeyHybrid if isOdd(p.Y) { @@ -162,3 +177,23 @@ func paddedAppend(size uint, dst, src []byte) []byte { } return append(dst, src...) } + +// GetCurve satisfies the chainec PublicKey interface. +func (p PublicKey) GetCurve() interface{} { + return p.Curve +} + +// GetX satisfies the chainec PublicKey interface. +func (p PublicKey) GetX() *big.Int { + return p.X +} + +// GetY satisfies the chainec PublicKey interface. +func (p PublicKey) GetY() *big.Int { + return p.Y +} + +// GetType satisfies the chainec PublicKey interface. +func (p PublicKey) GetType() int { + return ecTypeSecp256k1 +} diff --git a/btcec/pubkey_test.go b/dcrec/secp256k1/pubkey_test.go similarity index 91% rename from btcec/pubkey_test.go rename to dcrec/secp256k1/pubkey_test.go index a499655d..662731f6 100644 --- a/btcec/pubkey_test.go +++ b/dcrec/secp256k1/pubkey_test.go @@ -1,15 +1,16 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "bytes" "testing" - "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/dcrec/secp256k1" ) type pubKeyTest struct { @@ -20,7 +21,7 @@ type pubKeyTest struct { } var pubKeyTests = []pubKeyTest{ - // pubkey from bitcoin blockchain tx + // pubkey from decred blockchain tx // 0437cd7f8525ceed2324359c2d0ba26006d92d85 { name: "uncompressed ok", @@ -34,7 +35,7 @@ var pubKeyTests = []pubKeyTest{ 0xb4, 0x12, 0xa3, }, isValid: true, - format: btcec.TstPubkeyUncompressed, + format: secp256k1.TstPubkeyUncompressed, }, { name: "uncompressed x changed", @@ -87,7 +88,7 @@ var pubKeyTests = []pubKeyTest{ 0xb4, 0x12, 0xa3, }, isValid: true, - format: btcec.TstPubkeyHybrid, + format: secp256k1.TstPubkeyHybrid, }, { name: "uncompressed as hybrid wrong", @@ -111,7 +112,7 @@ var pubKeyTests = []pubKeyTest{ 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, }, isValid: true, - format: btcec.TstPubkeyCompressed, + format: secp256k1.TstPubkeyCompressed, }, // from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c { @@ -122,7 +123,7 @@ var pubKeyTests = []pubKeyTest{ 0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e, }, isValid: true, - format: btcec.TstPubkeyCompressed, + format: secp256k1.TstPubkeyCompressed, }, { name: "compressed claims uncompressed (ybit = 0)", @@ -210,14 +211,14 @@ var pubKeyTests = []pubKeyTest{ 0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8, }, - format: btcec.TstPubkeyHybrid, + format: secp256k1.TstPubkeyHybrid, isValid: true, }, } func TestPubKeys(t *testing.T) { for _, test := range pubKeyTests { - pk, err := btcec.ParsePubKey(test.key, btcec.S256()) + pk, err := secp256k1.ParsePubKey(test.key, secp256k1.S256()) if err != nil { if test.isValid { t.Errorf("%s pubkey failed when shouldn't %v", @@ -232,12 +233,12 @@ func TestPubKeys(t *testing.T) { } var pkStr []byte switch test.format { - case btcec.TstPubkeyUncompressed: - pkStr = (*btcec.PublicKey)(pk).SerializeUncompressed() - case btcec.TstPubkeyCompressed: - pkStr = (*btcec.PublicKey)(pk).SerializeCompressed() - case btcec.TstPubkeyHybrid: - pkStr = (*btcec.PublicKey)(pk).SerializeHybrid() + case secp256k1.TstPubkeyUncompressed: + pkStr = (*secp256k1.PublicKey)(pk).SerializeUncompressed() + case secp256k1.TstPubkeyCompressed: + pkStr = (*secp256k1.PublicKey)(pk).SerializeCompressed() + case secp256k1.TstPubkeyHybrid: + pkStr = (*secp256k1.PublicKey)(pk).SerializeHybrid() } if !bytes.Equal(test.key, pkStr) { t.Errorf("%s pubkey: serialized keys do not match.", diff --git a/dcrec/secp256k1/schnorr/ecdsa.go b/dcrec/secp256k1/schnorr/ecdsa.go new file mode 100644 index 00000000..b7626e2e --- /dev/null +++ b/dcrec/secp256k1/schnorr/ecdsa.go @@ -0,0 +1,405 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "bytes" + "crypto/rand" + "fmt" + "math/big" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/dcrec/secp256k1" +) + +// scalarSize is the size of an encoded big endian scalar. +const scalarSize = 32 + +var ( + // bigZero is the big representation of zero. + bigZero = new(big.Int).SetInt64(0) + + // ecTypeSecSchnorr is the ECDSA type for the chainec interface. + ecTypeSecSchnorr int = 2 +) + +// zeroArray zeroes the memory of a scalar array. +func zeroArray(a *[scalarSize]byte) { + for i := 0; i < scalarSize; i++ { + a[i] = 0x00 + } + + return +} + +// zeroSlice zeroes the memory of a scalar byte slice. +func zeroSlice(s []byte) { + for i := 0; i < scalarSize; i++ { + s[i] = 0x00 + } + + return +} + +// schnorrSign signs a Schnorr signature using a specified hash function +// and the given nonce, private key, message, and optional public nonce. +// CAVEAT: Lots of variable time algorithms using both the private key and +// k, which can expose the signer to constant time attacks. You have been +// warned! DO NOT use this algorithm where you might have the possibility +// of someone having EM field/cache/etc access. +// Memory management is also kind of sloppy and whether or not your keys +// or nonces can be found in memory later is likely a product of when the +// garbage collector runs. +// TODO Use field elements with constant time algorithms to prevent said +// attacks. +// This is identical to the Schnorr signature function found in libsecp256k1: +// https://github.com/bitcoin/secp256k1/tree/master/src/modules/schnorr +func schnorrSign(curve *secp256k1.KoblitzCurve, msg []byte, ps []byte, k []byte, + pubNonceX *big.Int, pubNonceY *big.Int, + hashFunc func([]byte) []byte) (*Signature, error) { + if len(msg) != scalarSize { + str := fmt.Sprintf("wrong size for message (got %v, want %v)", + len(msg), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + if len(ps) != scalarSize { + str := fmt.Sprintf("wrong size for privkey (got %v, want %v)", + len(ps), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + if len(k) != scalarSize { + str := fmt.Sprintf("wrong size for nonce k (got %v, want %v)", + len(k), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + + psBig := new(big.Int).SetBytes(ps) + kBig := new(big.Int).SetBytes(k) + + if psBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("secret scalar is zero") + return nil, schnorrError(ErrInputValue, str) + } + if psBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("secret scalar is out of bounds") + return nil, schnorrError(ErrInputValue, str) + } + if kBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("k scalar is zero") + return nil, schnorrError(ErrInputValue, str) + } + if kBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("k scalar is out of bounds") + return nil, schnorrError(ErrInputValue, str) + } + + // R = kG + var Rpx, Rpy *big.Int + Rpx, Rpy = curve.ScalarBaseMult(k) + if pubNonceX != nil && pubNonceY != nil { + // Optional: if k' exists then R = R+k' + Rpx, Rpy = curve.Add(Rpx, Rpy, pubNonceX, pubNonceY) + } + + // Check if the field element that would be represented by Y is odd. + // If it is, just keep k in the group order. + if Rpy.Bit(0) == 1 { + kBig.Mod(kBig, curve.N) + kBig.Sub(curve.N, kBig) + } + + // h = Hash(r || m) + Rpxb := BigIntToEncodedBytes(Rpx) + hashInput := make([]byte, 0, scalarSize*2) + hashInput = append(hashInput, Rpxb[:]...) + hashInput = append(hashInput, msg...) + h := hashFunc(hashInput) + hBig := new(big.Int).SetBytes(h) + + // If the hash ends up larger than the order of the curve, abort. + if hBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("hash of (R || m) too big") + return nil, schnorrError(ErrSchnorrHashValue, str) + } + + // s = k - hx + // TODO Speed this up a bunch by using field elements, not + // big ints. That we multiply the private scalar using big + // ints is also probably bad because we can only assume the + // math isn't in constant time, thus opening us up to side + // channel attacks. Using a constant time field element + // implementation will fix this. + sBig := new(big.Int) + sBig.Mul(hBig, psBig) + sBig.Sub(kBig, sBig) + sBig.Mod(sBig, curve.N) + + if sBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("sig s %v is zero") + return nil, schnorrError(ErrZeroSigS, str) + } + + // Zero out the private key and nonce when we're done with it. + kBig.SetInt64(0) + zeroSlice(k) + psBig.SetInt64(0) + zeroSlice(ps) + + return &Signature{Rpx, sBig}, nil +} + +// Sign is the exported version of sign. It uses RFC6979 and Blake256 to +// produce a Schnorr signature. +func Sign(curve *secp256k1.KoblitzCurve, priv *secp256k1.PrivateKey, + hash []byte) (r, s *big.Int, err error) { + // Convert the private scalar to a 32 byte big endian number. + pA := BigIntToEncodedBytes(priv.GetD()) + defer zeroArray(pA) + + // Generate a 32-byte scalar to use as a nonce. Try RFC6979 + // first. + kB := nonceRFC6979(priv.Serialize(), hash, nil, nil) + + for { + sig, err := schnorrSign(curve, hash, pA[:], kB, nil, nil, + chainhash.HashFuncB) + if err == nil { + r = sig.GetR() + s = sig.GetS() + break + } + + errTyped, ok := err.(SchnorrError) + if !ok { + return nil, nil, fmt.Errorf("unknown error type") + } + if errTyped.GetCode() != ErrSchnorrHashValue { + return nil, nil, err + } + + // We need to compute a new nonce, because the one we used + // didn't work. Compute a random nonce. + _, err = rand.Read(kB) + if err != nil { + return nil, nil, err + } + } + + return r, s, nil +} + +// schnorrVerify is the internal function for verification of a secp256k1 +// Schnorr signature. A secure hash function may be passed for the calculation +// of r. +// This is identical to the Schnorr verification function found in libsecp256k1: +// https://github.com/bitcoin/secp256k1/tree/master/src/modules/schnorr +func schnorrVerify(curve *secp256k1.KoblitzCurve, sig []byte, + pubkey *secp256k1.PublicKey, msg []byte, hashFunc func([]byte) []byte) (bool, + error) { + if len(msg) != scalarSize { + str := fmt.Sprintf("wrong size for message (got %v, want %v)", + len(msg), scalarSize) + return false, schnorrError(ErrBadInputSize, str) + } + + if len(sig) != SignatureSize { + str := fmt.Sprintf("wrong size for signature (got %v, want %v)", + len(sig), SignatureSize) + return false, schnorrError(ErrBadInputSize, str) + } + if pubkey == nil { + str := fmt.Sprintf("nil pubkey") + return false, schnorrError(ErrInputValue, str) + } + + if !curve.IsOnCurve(pubkey.GetX(), pubkey.GetY()) { + str := fmt.Sprintf("pubkey point is not on curve") + return false, schnorrError(ErrPointNotOnCurve, str) + } + + sigR := sig[:32] + sigS := sig[32:] + sigRCopy := make([]byte, scalarSize, scalarSize) + copy(sigRCopy, sigR) + toHash := append(sigRCopy, msg...) + h := hashFunc(toHash) + hBig := new(big.Int).SetBytes(h) + + // If the hash ends up larger than the order of the curve, abort. + // Same thing for hash == 0 (as unlikely as that is...). + if hBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("hash of (R || m) too big") + return false, schnorrError(ErrSchnorrHashValue, str) + } + if hBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("hash of (R || m) is zero value") + return false, schnorrError(ErrSchnorrHashValue, str) + } + + // Convert s to big int. + sBig := EncodedBytesToBigInt(copyBytes(sigS)) + + // We also can't have s greater than the order of the curve. + if sBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("s value is too big") + return false, schnorrError(ErrInputValue, str) + } + + // r can't be larger than the curve prime. + rBig := EncodedBytesToBigInt(copyBytes(sigR)) + if rBig.Cmp(curve.P) == 1 { + str := fmt.Sprintf("given R was greater than curve prime") + return false, schnorrError(ErrBadSigRNotOnCurve, str) + } + + // r' = hQ + sG + lx, ly := curve.ScalarMult(pubkey.GetX(), pubkey.GetY(), h) + rx, ry := curve.ScalarBaseMult(sigS) + rlx, rly := curve.Add(lx, ly, rx, ry) + + if rly.Bit(0) == 1 { + str := fmt.Sprintf("calculated R y-value was odd") + return false, schnorrError(ErrBadSigRYValue, str) + } + if !curve.IsOnCurve(rlx, rly) { + str := fmt.Sprintf("calculated R point was not on curve") + return false, schnorrError(ErrBadSigRNotOnCurve, str) + } + rlxB := BigIntToEncodedBytes(rlx) + + // r == r' --> valid signature + if !bytes.Equal(sigR, rlxB[:]) { + str := fmt.Sprintf("calculated R point was not given R") + return false, schnorrError(ErrUnequalRValues, str) + } + + return true, nil +} + +// Verify is the generalized and exported function for the verification of a +// secp256k1 Schnorr signature. BLAKE256 is used as the hashing function. +func Verify(curve *secp256k1.KoblitzCurve, pubkey *secp256k1.PublicKey, + msg []byte, r *big.Int, s *big.Int) bool { + sig := NewSignature(r, s) + ok, _ := schnorrVerify(curve, sig.Serialize(), pubkey, msg, + chainhash.HashFuncB) + + return ok +} + +// schnorrRecover recovers a public key using a signature, hash function, +// and message. It also attempts to verify the signature against the +// regenerated public key. +func schnorrRecover(curve *secp256k1.KoblitzCurve, sig, msg []byte, + hashFunc func([]byte) []byte) (*secp256k1.PublicKey, bool, error) { + if len(msg) != scalarSize { + str := fmt.Sprintf("wrong size for message (got %v, want %v)", + len(msg), scalarSize) + return nil, false, schnorrError(ErrBadInputSize, str) + } + + if len(sig) != SignatureSize { + str := fmt.Sprintf("wrong size for signature (got %v, want %v)", + len(sig), SignatureSize) + return nil, false, schnorrError(ErrBadInputSize, str) + } + + sigR := sig[:32] + sigS := sig[32:] + sigRCopy := make([]byte, scalarSize, scalarSize) + copy(sigRCopy, sigR) + toHash := append(sigRCopy, msg...) + h := hashFunc(toHash) + hBig := new(big.Int).SetBytes(h) + + // If the hash ends up larger than the order of the curve, abort. + // Same thing for hash == 0 (as unlikely as that is...). + if hBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("hash of (R || m) too big") + return nil, false, schnorrError(ErrSchnorrHashValue, str) + } + if hBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("hash of (R || m) is zero value") + return nil, false, schnorrError(ErrSchnorrHashValue, str) + } + + // Convert s to big int. + sBig := EncodedBytesToBigInt(copyBytes(sigS)) + + // We also can't have s greater than the order of the curve. + if sBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("s value is too big") + return nil, false, schnorrError(ErrInputValue, str) + } + + // r can't be larger than the curve prime. + rBig := EncodedBytesToBigInt(copyBytes(sigR)) + if rBig.Cmp(curve.P) == 1 { + str := fmt.Sprintf("given R was greater than curve prime") + return nil, false, schnorrError(ErrBadSigRNotOnCurve, str) + } + + // Decompress the Y value. We know that the first bit must + // be even. Use the PublicKey struct to make it easier. + compressedPoint := make([]byte, PubKeyBytesLen, PubKeyBytesLen) + compressedPoint[0] = pubkeyCompressed + copy(compressedPoint[1:], sigR) + rPoint, err := secp256k1.ParsePubKey(compressedPoint, curve) + if err != nil { + str := fmt.Sprintf("bad r point") + return nil, false, schnorrError(ErrRegenerateRPoint, str) + } + + // Get the inverse of the hash. + hInv := new(big.Int).ModInverse(hBig, curve.N) + hInv.Mod(hInv, curve.N) + + // Negate s. + sBig.Sub(curve.N, sBig) + sBig.Mod(sBig, curve.N) + + // s' = -s * inverse(h). + sBig.Mul(sBig, hInv) + sBig.Mod(sBig, curve.N) + + // Q = h^(-1)R + s'G + lx, ly := curve.ScalarMult(rPoint.GetX(), rPoint.GetY(), hInv.Bytes()) + rx, ry := curve.ScalarBaseMult(sBig.Bytes()) + pkx, pky := curve.Add(lx, ly, rx, ry) + + // Check if the public key is on the curve. + if !curve.IsOnCurve(pkx, pky) { + str := fmt.Sprintf("pubkey not on curve") + return nil, false, schnorrError(ErrPubKeyOffCurve, str) + } + pubkey := secp256k1.NewPublicKey(curve, pkx, pky) + + // Verify this signature. Slow, lots of double checks, could be more + // cheaply implemented as + // hQ + sG - R == 0 + // which this function checks. + // This will sometimes pass even for corrupted signatures, but + // this shouldn't be a concern because whoever is using the + // results should be checking the returned public key against + // some known one anyway. In the case of these Schnorr signatures, + // relatively high numbers of corrupted signatures (50-70%) + // seem to produce valid pubkeys and valid signatures. + _, err = schnorrVerify(curve, sig, pubkey, msg, hashFunc) + if err != nil { + str := fmt.Sprintf("pubkey/sig pair could not be validated") + return nil, false, schnorrError(ErrRegenSig, str) + } + + return pubkey, true, nil +} + +// RecoverPubkey is the exported and generalized version of schnorrRecover. +// It recovers a public key given a signature and a message, using BLAKE256 +// as the hashing function. +func RecoverPubkey(curve *secp256k1.KoblitzCurve, sig, + msg []byte) (*secp256k1.PublicKey, bool, error) { + + return schnorrRecover(curve, sig, msg, chainhash.HashFuncB) +} diff --git a/dcrec/secp256k1/schnorr/ecdsa_test.go b/dcrec/secp256k1/schnorr/ecdsa_test.go new file mode 100644 index 00000000..99e7c91b --- /dev/null +++ b/dcrec/secp256k1/schnorr/ecdsa_test.go @@ -0,0 +1,371 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "bytes" + "encoding/hex" + "math/rand" + "testing" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/stretchr/testify/assert" +) + +type SchorrSigningTestVectorHex struct { + msg string + nonce string + priv string + sig string +} + +// schnorrSigningTestVectors were produced using the testing functions +// implemented in libsecp256k1. +// https://github.com/bitcoin/secp256k1/blob/258720851e24e23c1036b4802a185850e258a105/src/modules/schnorr/tests_impl.h +var schnorrSigningTestVectors = []SchorrSigningTestVectorHex{ + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "D1C4C30F60582323A609B56B92270181EB05C3E5AB3E19AE1F768C65C6D09A29", "714D90C991E5D26CBF5771D8A84D087200AAA3197C3217A702ED8D69EA714CAB", "0A3E13BFD0B64C120AA25D27E3CD87678154A4461CE0AD471273927A6459F0C6" + "B9A36629C110ECEEEBBD52E7A5D491BB10AF59C3C73285B9427D1254F28DC460"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "86D9A69D76C1435EDC35347B50B4F944D30EDF8B5CB8E897E95F2C1F1B72D3C3", "60A30BC3BC7CDED4F13C9E3F20F69B8F7B4AB70E60825AE053FC88A2E7046C1F", "D60EFA079B194592A5200C60438A3617691FDE1B5FBCF788D0943A4BB69592F1" + "66D469F48267AA71DAF4BA996BA2BF3A99858C4BF854E2CDFC8AB7E6571D6A8C"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "499FE87D281A8EDEC40D7C29202CA93F9E612760C689543897255CC3B543F2E9", "0C23A2A854DD57AC4773533E84039BA165CA1F79BE8019BDF9EA3173741C67E9", "3C3483E5CDAAF894261071A948B1E21906CEF0293D10A3D20325EA84CC129B32" + "FF07618FAD7BE485A5A1C15DD6EE5485058D03514259714E724879AABCD70C5D"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "258E4C1130CACA9D1CF5DDB589551374DC06491BED02A72CBB8FC7D211E1ED20", "D4045E55A0FED015E1E90934C092C146680090F5538752152F35DAD6ECE70A45", "5EC936DC757F57473A84383F11511B78DB25ABEC5D0DEAE76ECFB30B7A006D9E" + "073D1EFC0C026F41BFBAF9E98FCA42CC6E1946123BD30B5BD27039FFF2D5FC48"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "8E4E9709D63F40167327325CDB6595AD806CABA520FC678993EA872151F6F520", "FD986300F2EB2F3F004D878B8E05C0E8D423DD4B7F112E1396406A0180F14956", "6025A47D8E92D32C417AF55414E6DB6FDEA98D64271C98C5C7FE4C4A6AF75727" + "4106736F0AC7C8782223BCFCDDB2D9396EF5169AE74E81EBE66F1EF3B5982A77"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "F6945B4B5A036BD85915F617054066E16B95392002CE7E7A4B1CD8C2190191A2", "80B060ACE5CB0D9971FB6A8E9A1342D88D144B56FE21A24B9183ED4DACADD30B", "7CF3258EEFE3B837916C21D7E13E0A9363FF6F82444D849D2607DB805B19BF68" + "1B78E1E9C509076E1361A0CE3CE46F4E155EC269D19EDCA9685788728EDD9269"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "1A25DEBF234C081CBFA24EF6A61F59ABE76EA58A91B5D749C31B5FBDFA7D80E9", "AF32FAC33D85C1BFE52C29CE47A9E62CF9E4B7EB66E94DACEDBB61AE0733F826", "31D285DA09369C500E4ADA47D868720852176B813AB25A998E7518855780FF08" + "2C04CE5E1BB33A951496475B594A3B42E740DAC3A0EBED07766CE79FA53DFB56"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "1C158A9FFB6CA28F2C2B4DE2680CCFD166D7FE24ED4E46125FDA392B1B547B24", "DEB515BB6074FBDD04EF7F17F055C82C2BE78C21639AED5F80663A0E89C4C9FE", "77383212DABB1F13B23EEE672C3D2D0CA84A58A148CAD023BC7D27F4C2A1BBD0" + "E3A2DB11E8DDC5D8DA1C55C14214FCA9C876EA0635DC260EDD47C8EE6C4B0F11"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "A11FBCD2EFB92CD714C088A5DAA8BEEE5645B7B70A253EB49F6A08E8E64B2F6F", "C57B3F9CB4E72C757BF477BCC5C6CBFEE8F9BFF35FCDE781F647648955F89D70", "180997C4BC9C6BDA81580330CDC4826B15A6A0B591023F84E5A5CCD8503D0E0D" + "172EA8B127F3BEAE03FBB6E3E20A8324A270905854B4EC4F6734C10A0A87DEE3"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "98BFCC52C846EB49CD728C566F58A62FF2D046D7B4A61CBDE8A2A3607AF5484D", "9A77F52ECFCF560FD9DE0B353CDE562FB1674B5ADD569832808C7015197F2600", "B9CD85998407F3E190BE5144BDF15ADDA8178C1F38C5168A2567C7895698DA40" + "E86BA74E0ACB2C7B9045826921265DAAF31FA05DAE8B83A6F3354B6CB493E52F"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "48DEFD73566CCAC57B1CB1FE7143767646B045A57D84C6C43B68958EDEED0439", "18F462C1C3EC3492613BA5E6C2D12C7A271948DB943A6A7081C8F9E58C308BB2", "EFC76E59819889BAEC540F5D89D28A4404B7EB98D34F941A7CA1D8C525256876" + "679BA0F14E5B2C1F69E1010D1B4FD0265AF34A583059805D5A3D13BBD4C8C83E"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "54415DD3EA817C1338D3AFDBBB142A25C2658B7A0E002BEA64E8DB88F5D62AC0", "56469DE2D40B2D7F0012042EFC7647D265D62254500B3D2864D9830D06C0536C", "DAD150AD11E5814E5DA4451A5D6EEE409779BD47D57C4AE7A788536E7E9426A3" + "DE5F61C9E54CE432BAB0FB8A4D6AF3D81A54062108DC2D23B397B7566AAB3A7A"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "7AC0AD497AD4F45285970132B2F15F48A61D2102EE60630A0345D20577070995", "2B72777C7784D50D538810A30AE3BF430A8A9C15E8658ACAED4D41207BA804D1", "827560F2F92A0905097E381BD0E962421A9E43E105585F9CFC3FB2D321369293" + "E16F37244D106FB0FFF2CF4A239F65C2A01D8C3FAAD187E39509AB0B16C7B72F"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "D7B06ED329566E59C551D71C52E9878DC49A2955159C30C5F373B29858A7CB89", "E76806E6BFDDC81B642A0A880313F8A552F448035B82CB2A91DD6175A497147C", "B772E3BFADC01A5088B9637E2E3D7D3269531091B79FC48ABCEE5A887BD3A11B" + "8BFA61100172D404D4938E097EEFFB0508320657BFE54699CD7490F5DC939333"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "23154117CD21BD4FCCB706F32D0945C7C835C632F02C6AC350B4E6BFDAA7F5BC", "96B0E0189F88AB41C486A65957CE6DEEEEBF2382F5393F9DAD1E8C65ADABEF25", "4D46A811696D6ADFCEDB303C4CA5474912502278D17BDB991C90C7EF54943B14" + "B2AF61F481E885C0B73033730AA78C3E59E7BB01B976D3ADA6EC5620FF9C279C"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "38B077D9A2448A2103CEA6F273C7F3720296A9A1814D0B71947D8AC83D00A86F", "836E462F675F2C9FCBF338F4F11494FF3A1BCA5221F6914B8219C5B189D77D40", "869A3BD110A8D32B6BFD8EF14B914C58276462EFF9CF8CE08279DF8BDAD24593" + "E34709964F528A1F0DFC112CAA2C3D26FE51E875CB6A495B57DCAB6876193511"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "FBD4EC7D96CB1F88BCBFEAF6FB7DDEE0898A3E4246C60FD688784B0F3C86444B", "8C5762E2019C91F24C80F1B4B7382912B573B3E92A8B9BDE3FCF4F902D84874C", "C2A63FE9E15D19D5ED6232A5641C225B7E0F06B4D4F0C53750E7BD889DAE0B36" + "50C13CFD562D104DEFD08C3CB208A4EFDF2EA405795E61D37162BF374E1F9CFF"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "76598BC70790E15BD717E9044BCDBD1B02EE09B9A7359CB77DFD1BC324C6AC54", "0CA72F2D1207B08162535A2C57282FB9B73C842818BB14378DB2F0C202F44BDA", "058A67F66CFFB0C7F14A5F8D98B068264E02DF7D372B0A1658308D6694F3E2D8" + "3B1014276872CD2ED9D19D3CE03AA0AE87F86FF1AC643D2179947552F363FA31"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "AAFEBB955B36B528034142464309C23C2F252F1A1BA97AB34D54FBD35899D896", "6A64A71C9BA0B2C5AD94F490CD76682B47C5EA7EE3B3B43208ADBB855DA9629E", "75541ADBACFE1483A27B2C261FFAD609BD38BDC9FE2C951B1B1A53C0929973C9" + "AC5B33E168B832CD2B65E1A164BBBB45939F26696367461A41EBA76984FD70A9"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "A19FDD9C1A161DC1B272E9484FD60B80374765D89C60928230BA51C671986262", "7B82469A8B8D1E9A12756869E255B70DB0404DB187565D88DFD78618077826F3", "6E5221F9AB9634644B3447C56504BE4B2C6DEC1024DC5E486F0D11E8ABFF7266" + "7096BD1F7FC33C5E1F7AAD7DF06C43FAF8B7A05F312ADFABF586D516B988E880"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "D463F98114D5EFBF206441C3BCB07B5D0358B9ABCD7212700D1E85A3B59B4B14", "7507E3E1CD0CAF7E951B8F46C7A52717203AA6922F5B3AA4B813AE05B3BED616", "8CEE361B3E0FB92B24CBC9E239E5D80DA558354E0108715286EFEFDEF55275B0" + "A68CAFB8E5C8499EE3895F24C007AA9A43E708B8A2959F43A8A9F8C14E4B2B8C"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "F15B442705E64A36688564BB3E5DC9ED8469AC73CE1794E549D31C34A8CF7367", "2C755FB91D25C48F0340519A54DF0026F7F6DC06B8D9DB3BC50BBF6A82344CCA", "45E262112967F578ABC595C6D12A887D60BCEB91FEA1F83EE6488F89159451ED" + "944891E90094B663718799A3729D0423FA95D7D327E480BE1556DE1F9D8C29DB"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "27261595EF702F48336F47DD2317461C06157C0868BD0E18A27BFA393551E5EE", "3DA9A1F356C519B7E4B7ACD05A648D4BEE8539ACF4B60D291E68D76B357D308B", "019C854E1D900122F706721816DB3895C6772B9EE254F39B326895D299910AF6" + "F58A6281C7DECCD1104094E1D84A5C182D549B55F834DB610F7CA0B5CD74A718"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "FF553D981379C5046D1927B50DB2B8D24A0274C09F7F4FBDDF100C59398DCC0E", "6470BE1A8EA8E9328B5F5F83B64677F4E346AEFCFCE71D9725E062998E0DCF42", "CE38C701700CBFA7442BB4E51C0C6F22FCCE1F39D4B22ADF5D5A910CD8E22CDD" + "55DC9688ED2F2ABFCEEB40C8FD217F274F9C369C8ADEF703AF7772C967C06F84"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "F38B04C155684AF2C717223DCC91EBC828B2D5B09EE807AE45C131693EC63637", "A8A1BEF67F7A3847DB2A57F3724467EDCC6454D29CA0E399F2A492898E69E497", "3F95D1916590BC74B6FDC4F3E412ED29E040358F17DFDD209C0F4AE04C9E94EA" + "D8281CCD545F57D80EA420CFDE5F81E350333BED9A39E116F72B17635AB57A92"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "C97757D41207BECB21EC0B4EB4AF7450F06124F43701C0260A46734F7C3D67B0", "D585B47F24BB9BC50A555A7FF2E706140404BCE393B2ADBBD927D3B4608294E9", "66BD8B3E23E11BE273AFA3257E9EF2D979A957E5C6B97A7966CFF11B4B581E67" + "EECA4295F554E20DB9F59793130595AAD73641C813AA15028190130592D21E95"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "2B58BCED7852B0F4198A0802CA0887DFA819BD73AE54E8EEE55FA4C76B28BE29", "638E732221DF5962F9ABD1FC69375058D20215F3ED226F6CCEED2C6F9D91B56B", "8AE5EFDF614C21FD637A3B26BB5655E4922F2598826A10DE2218B8BF4B7328F2" + "611C67385E260B9DD75C59623A003ECAD7F73811BE8BC7F015574ECFEBE3A9DD"}, + SchorrSigningTestVectorHex{"304502210088BE0644191B935DB1CD786B43FF27798006578D8C908906B49E89", "0371B3651E7ABC533801290E1C7E1D91D9AEB000ECF44FD0500773699A1C95C7", "6B7439B47606111ABBDE3429B6BD2938F0CEE87E7507A265655BAEFE25B48A16", "F3353583290AF1CC35AD4929633A0044EE4E7422E260AF341F597D240257C51D" + "F2813DA1E04C64A03FBBD57834897D6DEC8D441D1DECF80746502081355BAA3C"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFFFFFFFF03000000FCFFFFFFFFFFFFFF030000000000007CE0FFFFFFFFFFFF", "E0FFFFFFFF1F000000E0FFFFFFFFFFFFFFFFFFFFFFFFFFFF1F00E0FFFFFFFFFF", "D1637C2FFDBAF642250F8B54FEE34A98CB7DE641BA1E9EFA9A20EA874A5FDCFA" + "F0A7B370F9458C0562C0AEF18BBBB84D50BAA6533E05F2C11DDEFC1E4BB1A6C5"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0CF8FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFF070000F8FF3F0000", "FFFFFFFFFF07000000000000C001000000E0FF3F00F0FFFFFFFFFFFFFF1F0000", "C0DB1A31A4E93F49E53C48CC1766CCC51B9A214400CC6A784090E7E0409D49BE" + "A0CAE6C3073B305E89A810F64558CC636F1F59049D00D4D7824A8BC1CB0FBEC4"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "E1FFFFFFFFFFFF00F07F0000000000000000E0FFFFFF3F0000FEFF0FF8FFFFFF", "000000000000000000000000F0FFFFFF1F3000E0FF7FF8FFFFFFFFFF0F000000", "AE12E3F8D79D596E023A250D9DDDB150ED35509F6F4B80488551CD46F1CA9D8A" + "78ED5DB488348DA811E1E9A192AF4E250F1EE3DC46C2A035E19D39D8FEAF9FEB"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "9FFFFFFFFFFFFFFFFFFFFFFFDF00FCC1FFFFFFFFFFFFFFFFFFFFFFFFFFFF0F00", "FFFFFFFFFFFFFFFFFFFF1F0000000000000000F8FFFF0F000000E0FFFFFFFFFF", "FE984A34D9D7B673DFCA1BAADC1F39C546BAA222E253E66726CA3045CCCC948B" + "A1DF6CBA13DD65FDBAAD8017A5C1C95331DEE2C07FFCDFE204ACD3E70028CA11"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "000000C03FFCFFFF03000000FFFF7FF8FFFF1F000000FFFFFFFF0F0000FC1F00", "FEFFFFFFFF1F00000000000000000000000000FFFF3F00E0070000FCFFFFFF03", "4BF5AD0E0C37DCE99BC56CD2D3AB0FDDF1321AE371E4F7D5E113D1D26793C780" + "DF0E1E0B5D69D151DD08639E25171A73C0CEA6F983F6B93ABCD5CD187A1853D8"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFFFCFFFFDF010000000000F0FFFF3F000000FC1F007800FEFFFFFFFFFF0700", "00000000C0FFFFFFFFFFFFFFFFFFFFFF00FEFF03000000000080007E00000000", "3CCCCA01404CF09A1E906A00F61C1559FC3C8F1D29516D24D6BBD94A51664E0D" + "44B9CBA23BEC4D7DDA36DEA03F8D6077057C01ED9E33105629A9CC776DBE8985"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "00E0FFFFFFFFFFFFFFFF03FE3F000000000000000000FF1F0000000000000000", "FF3F00000000FEFFFFFF3F0000F8FFFFFF0F00C0FFFFFFFFFFFF0700000000C0", "2E4B97107F07E264BB3E7EBA6E019DE8B5FF11F2E2F42C92EFEB50DE38E9EC5D" + "2B3912BE0B0035FC5C8EC889312E4B47AAF460AD4A16752CE5266F395622D8BD"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "000000F8FFFFFFFFFF010000000000300000000000F8FF00000080FFFFFFFFFF", "FFFFFF07C0FF07000000980FE0FFFFFF0100000000000FE0FF0F0000FCFFFFFF", "52CC053205817B8727F8AEB8508345B77FC22F3C5CBC0A473D5AF2AD27F6C155" + "1DA91F27C36FED4DD90F61DD36457014F335C0773A0FBF423416294AE78E43FB"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFFFFFFFFFFFFFFFFFFFCFFFFFFE3FFFFFFFFFFFF0F00E0FFFFFF0000F0FF1F", "FFFFFFFFFF3F0000C0FFFFFFFFFFFFFF0F0000F0FF7F1E00000080FFFFFF07FC", "95F2E4ACECBF9603303FDB868FD3F9DCB1D4DFD6C311FE00EA26E65B740C2B9E" + "BD6038044DBE6EF5791991EE1E968BF9D25402EAD2497C212EC526EF4C6EEF3A"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0F0080FFFFFFFF0F00000000000000C0FFFFFFFFFFFFFF07000000C0FFFFFFFF", "FFFFFFFFFFFF01E0FFFFFFFFFFFFFFFF03F8FF7F00F0FFFF0700000000000000", "D163552127BF351D7918F66741435865FC04694090A4A9B3FDB2BAC13462A05C" + "8457E0464AEE6B27036C39066F0E1DF3DF952DF258FED6A42B9450F0FF27D9EB"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "003C803F00FFFFFFFFFFFFFFFF7F0000000000FE0700000000000000000000FF", "FF01000000C0FFFFFFFFFF3F00E00300003C000000E0FFFFFFFFFFFFFFFFFFFF", "00846EE6532D8B30C8912117FCBEF293EE79E212BA4507F33C8B90AF8DE35C01" + "8450F88A1B0F7EBCCBC155C30A68D51E60FFF9DC769B36E47C1AE34EDE9FA37B"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFFFFFFFFFFFFFFFFFFFFC1FFFFFFFFFF010000000080FFFFFFFF1F00C0FFFF", "FFFFFFFFFFFFFFFF010000C0FF81FF81FF100000000000000000000F0000F00F", "0F4C75E1A7EBA72A2D0F014F477FCC765E6EC36350F92EF75F08EEB4C1C3195F" + "18368C0E7B4510534443B8279C741D3F2D1B01766B051B452C3D0698D8E8D08D"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFFFF0100FCFFFF7F0000000000003F0000000000F0FFFFFFFF7FE0FFFFFFFF", "FFFFFFFFFF7FFFFFFFFFFF070000000000000000E0FFFFFFFFFF030000FCFFC3", "C05CFF7CE3C072431F05213A27ECD81288C83CBE48C39C43633B54B9AC0E6890" + "BC580EC26EFDEB276062CFE6B978F55430F757438183AA63B5F4B5226AAF675F"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFF000000FFFFFFFFE1FFFFFFFFFFFFFF030000000000000000F0FFFF00FFFF", "FFFF0700000000C0FFFFFFFFFFFFFFFFFF1F00C0FFFFFFFF0700FFFF03000000", "8CE4A5F3CD68B81548A2F7D59CB610FAE94AC52FA07ED40A769F08552E5C5BE0" + "54C46CAE9374DC4A10F8B6D36A1A9D7C4B5AB09415DDF7D229193D9349D511D1"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0000000000FFFFFFFFFFFF030000000000000000F81FE0FF3F00FEFF0F0000FE", "000000C0FFFF7F0000F8FFFFFFFFFF07000000F0FFFFFFFFFFFFFFFFFFFFFF0F", "A338C9A7F05E31D8D6870232B88E57B5EDD92A85423A9C256BE52B0DA92579E5" + "6467E2D50FAA93D2A56150ABC03D9DF69590E3AD72C4CC7D0C2FAC5AFEFE9085"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0000000000000000E0FFFF0700F8FFFFFFFF1F00000000000000000000000000", "FFFF0F00E0FF1F00000000000000F0FFFFFFFFFFFF010000000080FFFFFFFFFF", "FFC45EC42040A6F58B402AEA3C18E40614842BA7EFA73FF843B896FB426CCFA7" + "AF9A01F69B5CC37977014A83BCB28C1E8847B897159B6069A6F988DE634CC2AB"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "07000000000000000000000000F8FF7F00FFFFFFFFFF00000000C0FFFFFFFFFF", "000000000000FC0F00000000000000000000000000FEFF0300C4FFFFFF3F80FF", "F25FD62A0081100E01A501FBF1EC42678C6B9BAE722ABD4F4979E99E8CC06E25" + "8FB87682E70ABC22E0EFE457C76D78BF293BCE128381AE5EDD03A4195CA8AD40"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "80FFF8FF3FFFFFFFFFFFFFFFFFFFFFFFFFFFF1FFFFFFFF1F0000FEFFFFFFFF3F", "F8FFFFFFFFFFFFFF0FFFFF3F00000000000000000000000000C0FF0700000000", "075A4EB0F330215AEECA9E9180F129BC6E2F0A53A49FFDE5A272C3F04DEDA081" + "8BEEBDC42915B1EE2106DB38D310E70B1CD1CCF228B0B3D3640B65289988DEDE"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0100FEFF0100FFFFFFFF03000000FCFFFFFFFF3F0000E0FFFF01000000000000", "F8FF000000C0FFFF07000000000000000000FCFFFFFFFF03E001E0FFFFFFFFFF", "D1E8298DA8B147ABB128536F773BF456A3628719A237026BF5B458C8776F325B" + "85A448D3181B429C8AAC59343DD3E4DEBDBE01C44B8E6CE03C4F5FB4020DEBB9"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "00FEFFFFFF7F00FFFFFFFFFFFFFFFFFFFFFF0700FCFFFF00000000000080FF80", "0100000000000000000000000080FF1F0000C00300F0FFFF0700000000000000", "144B8536728044AEF88AF33D3087CCB1D1F50C41A5C972361469818A6FA498D4" + "9A536497E36544F704560801CE46DA59713B9B7287E100F77A69B9E88BE95B9F"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "00000000E0FFFFFFFF0100000000FFFFFF7F0000C0FFFFFF3F0080FFFFFFFFFF", "0000000000F0FFFF0FF8FF0300000000000000C0FFFFFFFF010000F8FFFF1F00", "ACAA7BE8ED867701F671DB71020483705480618CEF52BD96C0306ECED854702B" + "BF8CB65D50E21B0FAB21D6AD11DDAA0183CFE29F2FE348DF4A3E73A07111E313"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "000000000000000000C0FF0FFCFFFFFFFFFFFFFFFFFFFF1F0000000000000000", "0080FFFFFFFFFF000000C0FFFFFFFF1F00FCFFFFFFFFFF7F000000E0FFFF00E0", "F71708DB249A209F44771F98E3D659F5139823835B188982EE195A50D3D860B4" + "5226533153B14AA89D5F7958B9799BB893BFADC9528C871087AC795ADE224595"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "00000000E0FF7F0000000000000080FF1FF8FFFFC1FFFFFF3F00FFFF03000000", "01000000000000000080FFFFFFFFFFFF3F0000FEFFFFFFFFFF0F0000F8FF1F00", "54A25E83137863A94778D33F031BAC8DC10F39913A8B3FD792660F1034C70DBD" + "2B62E4B73DBFF01B97401ECD2F03DF81090FEC1A59FA161687A2BAAAEEE514DD"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "80FFFFFFFFFFFF0300F8FFFFFFFF1F00000000C0FF0300001C0080FF1F7F0000", "FF7FC0FFFFFFFFFFFFFFFFFF030000FCFF3F0000F0FFFFFFFFFFFF0F00060000", "75161FC21626DD84BD58778207FC7F58506E11204A95A603D5E8E6CA575CE2F1" + "F5984BFA2C970641936FACD9C9778AAE02EC89483AF879D0BC063FB516744E62"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "000000FCFFFF0100000000000000C0FFFFFFFFFFFFFFFFFFF7FFFFFF1F000000", "00FCFF3FC0FFFF0100000000000000004000000000FF7F00000000E0FFFF3F00", "971FA2BC49EBA95D1CCC10706D34C809ECB6B6521B4B028B3029E104404E7DE8" + "1833F25C71C6A100CDCACB2E154A1AA2B140671A79D001913F4AFDF830C255C2"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FF030000E0FFFFFF3F80FFFF017E00FCFF0100FCFFFFFF010000F0FF07FCFFFF", "000000000080FFFFFFFF03FCFFFFFFFF0700000000E0FF0F0000000000F8FFF0", "7655EEC4926FEC2B7D0ABFB1A63333E9B9CB893A9366CC597090F55CD0D2602F" + "2827D561883C5D4924712D0EE305891E5869132F254F4AAD8E553E10A85975AE"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "000000FEFFFFFF7F0000008003000000000000000080FFFFFF07FEFFFFFF7F00", "FFFFFFFFFFFF3FF8FFFFFFFFFFFF0100000000FCFFFFFFFFFFFF0700E0FF0100", "EDE211DEE2487804585D84DAE822BF376C318CB44D6EF6EA9526BF1C26C6AB6A" + "5BF3BD71AA4A9D8990055BF9AA49AD82BF549B86D0E06B7D90F601E45C167F39"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0700000000000000FCFFFFFFFFFFFF0084FFFF0000F8FFFFFF0700FEFFFFFFFF", "00001C00FFFF070000C0FF0100F0FF030000C0FFFFFFFF00FEFFFFFFFFFF0100", "2836BD0AD158AFA88ACAF13A0EC8DCE4414EEC0D282B2BA18820A712E252493B" + "E03F8FB67706EF558F2356B4385A25511613BB8D5132C91CD5084CAB5E56323B"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "0000000000FEFF0F0000000080FFFFFFFF0F000000FFFFFFFF0100F0FFFF0700", "000000000000000000FFFF03809FFFFFFFFFFFFF0F0000000000000000000000", "2A5837844FDEDC8C5FD40350737EB0412DFBEA43AF75736D2669D9E7A4341917" + "0EECFF456B3C3BA4AD1B74DC45C7FA0D77DDC52A60943687008C768839149E7C"}, + SchorrSigningTestVectorHex{"304402207C7BC9E2D115C4C5C3E50950E69B30A9810BD73946A6D23C4ACBFF2E", "FFFF01000000FEFFFFFFFFF3FFFFFFFFFFFF7F000000000000F07FF8FFFFFFFF", "FFFFFFFF80FFFF07FEFF7F00000000000000FC07FCFFFF0700000000E0FFFFFF", "93E083E71C14BA94479CBE92213A56FF2ECFF8F2B085B2B3AA5CC6E8FEFAEAC0" + "76444710354091BB4FE9A218E875885F81DD787241A766C4E0422C5C1D7AD271"}, +} + +type SchorrSigningTestVector struct { + msg []byte + nonce []byte + priv []byte + sig []byte +} + +func GetSigningTestVectors() []*SchorrSigningTestVector { + tvs := make([]*SchorrSigningTestVector, 0) + for _, v := range schnorrSigningTestVectors { + msg, _ := hex.DecodeString(v.msg) + nonce, _ := hex.DecodeString(v.nonce) + priv, _ := hex.DecodeString(v.priv) + sig, _ := hex.DecodeString(v.sig) + lv := SchorrSigningTestVector{msg, nonce, priv, sig} + tvs = append(tvs, &lv) + } + + return tvs +} + +// Horribly broken hash function. Do not use for anything but tests. +func testSchnorrHash(msg []byte) []byte { + h32 := make([]byte, scalarSize, scalarSize) + + j := 32 + for i := 0; i < 32; i++ { + h32[i] = msg[i] ^ msg[j] + j++ + } + + return h32 +} + +func TestSchnorrSigning(t *testing.T) { + tRand := rand.New(rand.NewSource(54321)) + curve := secp256k1.S256() + tvs := GetSigningTestVectors() + for _, tv := range tvs { + _, pubkey := secp256k1.PrivKeyFromBytes(curve, tv.priv) + + sig, err := + schnorrSign(curve, tv.msg, tv.priv, tv.nonce, nil, nil, + testSchnorrHash) + + assert.NoError(t, err) + assert.Equal(t, sig.Serialize(), tv.sig) + + // Make sure they verify too while we're at it. + _, err = schnorrVerify(curve, sig.Serialize(), pubkey, tv.msg, + testSchnorrHash) + assert.NoError(t, err) + + // See if we can recover the public keys OK. + var pkRecover *secp256k1.PublicKey + pkRecover, _, err = schnorrRecover(curve, sig.Serialize(), tv.msg, + testSchnorrHash) + assert.NoError(t, err) + if err == nil { + assert.Equal(t, pubkey.Serialize(), pkRecover.Serialize()) + } + + // Screw up the signature at a random bit and make sure that breaks it. + sigBad := sig.Serialize() + pos := tRand.Intn(63) + bitPos := tRand.Intn(7) + sigBad[pos] ^= 1 << uint8(bitPos) + _, err = schnorrVerify(curve, sigBad, pubkey, tv.msg, + testSchnorrHash) + assert.Error(t, err) + + // Make sure it breaks pubkey recovery too. + valid := false + pkRecover, valid, err = schnorrRecover(curve, sigBad, tv.msg, + testSchnorrHash) + if valid { + assert.NotEqual(t, pubkey.Serialize(), pkRecover.Serialize()) + } else { + assert.Error(t, err) + } + } +} + +func randPrivKeyList(curve *secp256k1.KoblitzCurve, + i int) []*secp256k1.PrivateKey { + r := rand.New(rand.NewSource(54321)) + + privKeyList := make([]*secp256k1.PrivateKey, i, i) + for j := 0; j < i; j++ { + for { + bIn := new([32]byte) + for k := 0; k < scalarSize; k++ { + randByte := r.Intn(255) + bIn[k] = uint8(randByte) + } + + pks, _ := secp256k1.PrivKeyFromBytes(curve, bIn[:]) + if pks == nil { + continue + } + + // No duplicates allowed. + if j > 0 && + (bytes.Equal(pks.Serialize(), privKeyList[j-1].Serialize())) { + r.Seed(int64(j) + r.Int63n(12345)) + continue + } + privKeyList[j] = pks + r.Seed(int64(j) + 54321) + break + } + } + + return privKeyList +} + +type SignatureVerParams struct { + pubkey *secp256k1.PublicKey + msg []byte + sig *Signature +} + +func randSigList(curve *secp256k1.KoblitzCurve, i int) []*SignatureVerParams { + r := rand.New(rand.NewSource(54321)) + + privKeyList := make([]*secp256k1.PrivateKey, i, i) + for j := 0; j < i; j++ { + for { + bIn := new([32]byte) + for k := 0; k < scalarSize; k++ { + randByte := r.Intn(255) + bIn[k] = uint8(randByte) + } + + pks, _ := secp256k1.PrivKeyFromBytes(curve, bIn[:]) + if pks == nil { + continue + } + privKeyList[j] = pks + r.Seed(int64(j) + 54321) + break + } + } + + msgList := make([][]byte, i, i) + for j := 0; j < i; j++ { + m := make([]byte, 32, 32) + for k := 0; k < scalarSize; k++ { + randByte := r.Intn(255) + m[k] = uint8(randByte) + } + msgList[j] = m + r.Seed(int64(j) + 54321) + } + + sigsList := make([]*Signature, i, i) + for j := 0; j < i; j++ { + r, s, err := Sign(curve, privKeyList[j], msgList[j]) + if err != nil { + panic("sign failure") + } + sig := &Signature{r, s} + sigsList[j] = sig + } + + sigStructList := make([]*SignatureVerParams, i, i) + for j := 0; j < i; j++ { + ss := new(SignatureVerParams) + pkx, pky := privKeyList[j].Public() + ss.pubkey = secp256k1.NewPublicKey(curve, pkx, pky) + ss.msg = msgList[j] + ss.sig = sigsList[j] + sigStructList[j] = ss + } + + return sigStructList +} + +// Use our actual hashing algorithm here. +func TestSignaturesAndRecovery(t *testing.T) { + curve := secp256k1.S256() + r := rand.New(rand.NewSource(54321)) + + numSigs := 128 + sigList := randSigList(curve, numSigs) + + for _, tv := range sigList { + pubkey := tv.pubkey + sig := tv.sig + + // Make sure we can verify the original signature. + _, err := schnorrVerify(curve, sig.Serialize(), pubkey, tv.msg, + chainhash.HashFuncB) + assert.NoError(t, err) + + ok := Verify(curve, pubkey, tv.msg, sig.R, sig.S) + assert.Equal(t, true, ok) + + // See if we can recover the public keys OK. + var pkRecover *secp256k1.PublicKey + pkRecover, _, err = schnorrRecover(curve, sig.Serialize(), tv.msg, + chainhash.HashFuncB) + assert.NoError(t, err) + if err == nil { + assert.Equal(t, pubkey.Serialize(), pkRecover.Serialize()) + } + + // Screw up the signature at some random bits and make sure + // that breaks it. + numBadBits := r.Intn(2) + sigBad := sig.Serialize() + // (numBadBits*2)+1 --> always odd so at least one bit is different + for i := 0; i < (numBadBits*2)+1; i++ { + pos := r.Intn(63) + bitPos := r.Intn(7) + sigBad[pos] ^= 1 << uint8(bitPos) + } + _, err = schnorrVerify(curve, sigBad, pubkey, tv.msg, + chainhash.HashFuncB) + assert.Error(t, err) + + // Make sure it breaks pubkey recovery too. + valid := false + pkRecover, valid, err = schnorrRecover(curve, sigBad, tv.msg, + testSchnorrHash) + if valid { + assert.NotEqual(t, pubkey.Serialize(), pkRecover.Serialize()) + } else { + assert.Error(t, err) + } + } +} + +func benchmarkSigning(b *testing.B) { + curve := secp256k1.S256() + + r := rand.New(rand.NewSource(54321)) + msg := []byte{ + 0xbe, 0x13, 0xae, 0xf4, + 0xe8, 0xa2, 0x00, 0xb6, + 0x45, 0x81, 0xc4, 0xd1, + 0x0c, 0xf4, 0x1b, 0x5b, + 0xe1, 0xd1, 0x81, 0xa7, + 0xd3, 0xdc, 0x37, 0x55, + 0x58, 0xc1, 0xbd, 0xa2, + 0x98, 0x2b, 0xd9, 0xfb, + } + + numKeys := 1024 + privKeyList := randPrivKeyList(curve, numKeys) + + for n := 0; n < b.N; n++ { + randIndex := r.Intn(numKeys - 1) + _, _, err := Sign(curve, privKeyList[randIndex], msg) + if err != nil { + panic("sign failure") + } + } +} + +func BenchmarkSigning(b *testing.B) { benchmarkSigning(b) } + +func benchmarkVerification(b *testing.B) { + curve := secp256k1.S256() + r := rand.New(rand.NewSource(54321)) + + numSigs := 1024 + sigList := randSigList(curve, numSigs) + + for n := 0; n < b.N; n++ { + randIndex := r.Intn(numSigs - 1) + ver := Verify(curve, + sigList[randIndex].pubkey, + sigList[randIndex].msg, + sigList[randIndex].sig.R, + sigList[randIndex].sig.S) + if ver != true { + panic("made invalid sig") + } + } +} + +func BenchmarkVerification(b *testing.B) { benchmarkVerification(b) } diff --git a/dcrec/secp256k1/schnorr/error.go b/dcrec/secp256k1/schnorr/error.go new file mode 100644 index 00000000..318c9428 --- /dev/null +++ b/dcrec/secp256k1/schnorr/error.go @@ -0,0 +1,109 @@ +// Copyright (c) 2014 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "fmt" +) + +// ErrorCode identifies a kind of error. +type ErrorCode int + +// These constants are used to identify a specific RuleError. +const ( + // ErrBadInputSize indicates that input to a signature was of the wrong size. + ErrBadInputSize = iota + + // ErrInputValue indicates that the value of an input was wrong (e.g. zero). + ErrInputValue + + // ErrSchnorrHashValue indicates that the hash of (R || m) was too large + // and so a new k value (nonce) should be used. + ErrSchnorrHashValue + + // ErrPointNotOnCurve indicates that a point was not on the given + // elliptic curve. + ErrPointNotOnCurve + + // ErrBadSigRYValue indicates that the calculated Y value of R was odd, + // which is not allowed. + ErrBadSigRYValue + + // ErrBadSigRNotOnCurve indicates that the calculated or given point R for some + // signature was not on the curve. + ErrBadSigRNotOnCurve + + // ErrUnequalRValues indicates that the calculated point R for some + // signature was not the same as the given R value for the signature. + ErrUnequalRValues + + // ErrRegenerateRPoint indicates that a point could not be regenerated + // from r. + ErrRegenerateRPoint + + // ErrPubKeyOffCurve indicates that a regenerated pubkey was off the curve. + ErrPubKeyOffCurve + + // ErrRegenSig indicates that a regenerated pubkey could not be validated + // against the signature. + ErrRegenSig + + // ErrBadNonce indicates that a generated nonce from some algorithm was + // unusable. + ErrBadNonce + + // ErrZeroSigS indates a zero signature S value, which is invalid. + ErrZeroSigS + + // ErrNonmatchingR indicates that all signatures to be combined in a + // threshold signature failed to have a matching R value. + ErrNonmatchingR +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrBadInputSize: "BadInputSize", + ErrInputValue: "ErrInputValue", + ErrSchnorrHashValue: "ErrSchnorrHashValue", + ErrPointNotOnCurve: "ErrPointNotOnCurve", + ErrBadSigRYValue: "ErrBadSigRYValue", + ErrBadSigRNotOnCurve: "ErrBadSigRNotOnCurve", + ErrRegenerateRPoint: "ErrRegenerateRPoint", + ErrPubKeyOffCurve: "ErrPubKeyOffCurve", + ErrRegenSig: "ErrRegenSig", + ErrBadNonce: "ErrBadNonce", + ErrZeroSigS: "ErrZeroSigS", + ErrNonmatchingR: "ErrNonmatchingR", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", int(e)) +} + +// SchnorrError identifies a violation. +type SchnorrError struct { + ErrorCode ErrorCode // Describes the kind of error + Description string // Human readable description of the issue +} + +// Error satisfies the error interface and prints human-readable errors. +func (e SchnorrError) Error() string { + return e.Description +} + +// Error satisfies the error interface and prints human-readable errors. +func (e SchnorrError) GetCode() ErrorCode { + return e.ErrorCode +} + +// schnorrError creates a SchnorrError given a set of arguments. +func schnorrError(c ErrorCode, desc string) SchnorrError { + return SchnorrError{ErrorCode: c, Description: desc} +} diff --git a/dcrec/secp256k1/schnorr/primitives.go b/dcrec/secp256k1/schnorr/primitives.go new file mode 100644 index 00000000..c5df61ef --- /dev/null +++ b/dcrec/secp256k1/schnorr/primitives.go @@ -0,0 +1,100 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "math/big" +) + +// copyBytes copies a byte slice to a 32 byte array. +func copyBytes(aB []byte) *[32]byte { + if aB == nil { + return nil + } + s := new([32]byte) + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < scalarSize { + diff := scalarSize - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < scalarSize; i++ { + s[i] = aB[i] + } + + return s +} + +// copyBytes64 copies a byte slice to a 64 byte array. +func copyBytes64(aB []byte) *[64]byte { + if aB == nil { + return nil + } + + s := new([64]byte) + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < 64 { + diff := 64 - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < 64; i++ { + s[i] = aB[i] + } + + return s +} + +// BigIntToEncodedBytes converts a big integer into its corresponding +// 32 byte little endian representation. +func BigIntToEncodedBytes(a *big.Int) *[32]byte { + s := new([32]byte) + if a == nil { + return s + } + // Caveat: a can be longer than 32 bytes. + aB := a.Bytes() + + // If we have a short byte string, expand + // it so that it's long enough. + aBLen := len(aB) + if aBLen < scalarSize { + diff := scalarSize - aBLen + for i := 0; i < diff; i++ { + aB = append([]byte{0x00}, aB...) + } + } + + for i := 0; i < scalarSize; i++ { + s[i] = aB[i] + } + + return s +} + +// EncodedBytesToBigInt converts a 32 byte big endian representation of +// an integer into a big integer. +func EncodedBytesToBigInt(s *[32]byte) *big.Int { + // Use a copy so we don't screw up our original + // memory. + sCopy := new([32]byte) + for i := 0; i < scalarSize; i++ { + sCopy[i] = s[i] + } + + bi := new(big.Int).SetBytes(sCopy[:]) + + return bi +} diff --git a/dcrec/secp256k1/schnorr/pubkey.go b/dcrec/secp256k1/schnorr/pubkey.go new file mode 100644 index 00000000..49ea4008 --- /dev/null +++ b/dcrec/secp256k1/schnorr/pubkey.go @@ -0,0 +1,46 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "fmt" + + "github.com/decred/dcrd/dcrec/secp256k1" +) + +// These constants define the lengths of serialized public keys. +const ( + PubKeyBytesLen = 33 +) + +const ( + // pubkeyCompressed is the header byte for a compressed secp256k1 pubkey. + pubkeyCompressed byte = 0x2 // y_bit + x coord +) + +// ParsePubKey parses a public key for a koblitz curve from a bytestring into a +// ecdsa.Publickey, verifying that it is valid. It supports compressed, +// uncompressed and hybrid signature formats. +func ParsePubKey(curve *secp256k1.KoblitzCurve, + pubKeyStr []byte) (key *secp256k1.PublicKey, err error) { + if pubKeyStr == nil { + err = fmt.Errorf("nil pubkey byte string") + return + } + if len(pubKeyStr) != PubKeyBytesLen { + err = fmt.Errorf("bad pubkey byte string size (want %v, have %v)", + PubKeyBytesLen, len(pubKeyStr)) + return + } + format := pubKeyStr[0] + format &= ^byte(0x1) + if format != pubkeyCompressed { + err = fmt.Errorf("wrong pubkey type (not compressed)") + return + } + + return secp256k1.ParsePubKey(pubKeyStr, curve) +} diff --git a/dcrec/secp256k1/schnorr/signature.go b/dcrec/secp256k1/schnorr/signature.go new file mode 100644 index 00000000..045e686d --- /dev/null +++ b/dcrec/secp256k1/schnorr/signature.go @@ -0,0 +1,75 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "fmt" + "math/big" +) + +// Signature is a type representing a Schnorr signature. +type Signature struct { + R *big.Int + S *big.Int +} + +// SignatureSize is the size of an encoded Schnorr signature. +const SignatureSize = 64 + +// NewSignature instantiates a new signature given some R,S values. +func NewSignature(r, s *big.Int) *Signature { + return &Signature{r, s} +} + +// Serialize returns the Schnorr signature in the more strict format. +// +// The signatures are encoded as +// sig[0:32] R, a point encoded as big endian +// sig[32:64] S, scalar multiplication/addition results = (ab+c) mod l +// encoded also as big endian +func (sig Signature) Serialize() []byte { + rBytes := BigIntToEncodedBytes(sig.R) + sBytes := BigIntToEncodedBytes(sig.S) + + all := append(rBytes[:], sBytes[:]...) + + return all +} + +func parseSig(sigStr []byte) (*Signature, error) { + if len(sigStr) != SignatureSize { + return nil, fmt.Errorf("bad signature size; have %v, want %v", + len(sigStr), SignatureSize) + } + + rBytes := copyBytes(sigStr[0:32]) + r := EncodedBytesToBigInt(rBytes) + sBytes := copyBytes(sigStr[32:64]) + s := EncodedBytesToBigInt(sBytes) + + return &Signature{r, s}, nil +} + +// ParseSignature parses a signature in BER format for the curve type `curve' +// into a Signature type, perfoming some basic sanity checks. +func ParseSignature(sigStr []byte) (*Signature, error) { + return parseSig(sigStr) +} + +// GetR satisfies the chainec PublicKey interface. +func (s Signature) GetR() *big.Int { + return s.R +} + +// GetS satisfies the chainec PublicKey interface. +func (s Signature) GetS() *big.Int { + return s.S +} + +// GetType satisfies the chainec Signature interface. +func (s Signature) GetType() int { + return ecTypeSecSchnorr +} diff --git a/dcrec/secp256k1/schnorr/threshold.go b/dcrec/secp256k1/schnorr/threshold.go new file mode 100644 index 00000000..7be83558 --- /dev/null +++ b/dcrec/secp256k1/schnorr/threshold.go @@ -0,0 +1,240 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "fmt" + "math/big" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/dcrec/secp256k1" +) + +// Sha256VersionStringRFC6979 is the RFC6979 nonce version for a Schnorr signature +// over the secp256k1 curve using SHA256 as the hash function. +var Sha256VersionStringRFC6979 = []byte("Schnorr+SHA256 ") + +// Sha256VersionStringRFC6979 is the RFC6979 nonce version for a Schnorr signature +// over the secp256k1 curve using BLAKE256 as the hash function. +var BlakeVersionStringRFC6979 = []byte("Schnorr+BLAKE256") + +// CombinePubkeys combines a slice of public keys into a single public key +// by adding them together with point addition. +func CombinePubkeys(curve *secp256k1.KoblitzCurve, + pks []*secp256k1.PublicKey) *secp256k1.PublicKey { + numPubKeys := len(pks) + + // Have to have at least two pubkeys. + if numPubKeys < 1 { + return nil + } + if numPubKeys == 1 { + return pks[0] + } + if pks[0] == nil || pks[1] == nil { + return nil + } + + var pkSumX *big.Int + var pkSumY *big.Int + + pkSumX, pkSumY = curve.Add(pks[0].GetX(), pks[0].GetY(), + pks[1].GetX(), pks[1].GetY()) + + if numPubKeys > 2 { + for i := 2; i < numPubKeys; i++ { + pkSumX, pkSumY = curve.Add(pkSumX, pkSumY, + pks[i].GetX(), pks[i].GetY()) + } + } + + if !curve.IsOnCurve(pkSumX, pkSumY) { + return nil + } + + return secp256k1.NewPublicKey(curve, pkSumX, pkSumY) +} + +// nonceRFC6979 is a local instatiation of deterministic nonce generation +// by the standards of RFC6979. +func nonceRFC6979(privkey []byte, hash []byte, extra []byte, + version []byte) []byte { + pkD := new(big.Int).SetBytes(privkey) + defer pkD.SetInt64(0) + kBig := secp256k1.NonceRFC6979(pkD, hash, extra, version) + defer kBig.SetInt64(0) + k := BigIntToEncodedBytes(kBig) + return k[:] +} + +// generateNoncePair deterministically generate a nonce pair for use in +// partial signing of a message. Returns a public key (nonce to disseminate) +// and a private nonce to keep as a secret for the signer. +func generateNoncePair(curve *secp256k1.KoblitzCurve, msg []byte, priv []byte, + nonceFunction func([]byte, []byte, []byte, []byte) []byte, extra []byte, + version []byte) ([]byte, *secp256k1.PublicKey, error) { + k := nonceFunction(priv, msg, extra, version) + kBig := new(big.Int).SetBytes(k) + + // k scalar sanity checks. + if kBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("k scalar is zero") + return nil, nil, schnorrError(ErrBadNonce, str) + } + if kBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("k scalar is >= curve.N") + return nil, nil, schnorrError(ErrBadNonce, str) + } + kBig.SetInt64(0) + + pubx, puby := curve.ScalarBaseMult(k) + pubnonce := secp256k1.NewPublicKey(curve, pubx, puby) + + return k, pubnonce, nil +} + +// GenerateNoncePair is the generalized and exported version of generateNoncePair. +func GenerateNoncePair(curve *secp256k1.KoblitzCurve, msg []byte, + privkey *secp256k1.PrivateKey, extra []byte, + version []byte) (*secp256k1.PrivateKey, *secp256k1.PublicKey, error) { + priv, pubNonce, err := generateNoncePair(curve, msg, privkey.Serialize(), + nonceRFC6979, extra, version) + if err != nil { + return nil, nil, err + } + + privNonce := secp256k1.NewPrivateKey(curve, + EncodedBytesToBigInt(copyBytes(priv))) + return privNonce, pubNonce, nil +} + +// schnorrPartialSign creates a partial Schnorr signature which may be combined +// with other Schnorr signatures to create a valid signature for a group pubkey. +func schnorrPartialSign(curve *secp256k1.KoblitzCurve, msg []byte, priv []byte, + privNonce []byte, pubSum *secp256k1.PublicKey, + hashFunc func([]byte) []byte) (*Signature, error) { + // Sanity checks. + if len(msg) != scalarSize { + str := fmt.Sprintf("wrong size for message (got %v, want %v)", + len(msg), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + if len(priv) != scalarSize { + str := fmt.Sprintf("wrong size for privkey (got %v, want %v)", + len(priv), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + if len(privNonce) != scalarSize { + str := fmt.Sprintf("wrong size for privnonce (got %v, want %v)", + len(privNonce), scalarSize) + return nil, schnorrError(ErrBadInputSize, str) + } + if pubSum == nil { + str := fmt.Sprintf("nil pubkey") + return nil, schnorrError(ErrInputValue, str) + } + + privBig := new(big.Int).SetBytes(priv) + if privBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("priv scalar is zero") + return nil, schnorrError(ErrInputValue, str) + } + if privBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("priv scalar is out of bounds") + return nil, schnorrError(ErrInputValue, str) + } + privBig.SetInt64(0) + + privNonceBig := new(big.Int).SetBytes(privNonce) + if privNonceBig.Cmp(bigZero) == 0 { + str := fmt.Sprintf("privNonce scalar is zero") + return nil, schnorrError(ErrInputValue, str) + } + if privNonceBig.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("privNonce scalar is out of bounds") + return nil, schnorrError(ErrInputValue, str) + } + privNonceBig.SetInt64(0) + + if !curve.IsOnCurve(pubSum.GetX(), pubSum.GetY()) { + str := fmt.Sprintf("public key sum is off curve") + return nil, schnorrError(ErrInputValue, str) + } + + return schnorrSign(curve, msg, priv, privNonce, pubSum.GetX(), + pubSum.GetY(), hashFunc) +} + +// schnorrCombineSigs is the generalized and exported version of +// schnorrPartialSign. +func SchnorrPartialSign(curve *secp256k1.KoblitzCurve, msg []byte, + priv *secp256k1.PrivateKey, privNonce *secp256k1.PrivateKey, + pubSum *secp256k1.PublicKey) (*Signature, error) { + privBytes := priv.Serialize() + defer zeroSlice(privBytes) + privNonceBytes := privNonce.Serialize() + defer zeroSlice(privNonceBytes) + + return schnorrPartialSign(curve, msg, privBytes, privNonceBytes, pubSum, + chainhash.HashFuncB) +} + +// schnorrCombineSigs combines a list of partial Schnorr signatures s values +// into a complete signature s for some group public key. This is achieved +// by simply adding the s values of the partial signatures as scalars. +func schnorrCombineSigs(curve *secp256k1.KoblitzCurve, sigss [][]byte) (*big.Int, + error) { + combinedSigS := new(big.Int).SetInt64(0) + for i, sigs := range sigss { + sigsBI := EncodedBytesToBigInt(copyBytes(sigs)) + if sigsBI.Cmp(bigZero) == 0 { + str := fmt.Sprintf("sig s %v is zero", i) + return nil, schnorrError(ErrInputValue, str) + } + if sigsBI.Cmp(curve.N) >= 0 { + str := fmt.Sprintf("sig s %v is out of bounds", i) + return nil, schnorrError(ErrInputValue, str) + } + + combinedSigS.Add(combinedSigS, sigsBI) + combinedSigS.Mod(combinedSigS, curve.N) + } + + if combinedSigS.Cmp(bigZero) == 0 { + str := fmt.Sprintf("combined sig s %v is zero") + return nil, schnorrError(ErrZeroSigS, str) + } + + return combinedSigS, nil +} + +// schnorrCombineSigs is the generalized and exported version of +// generateNoncePair. +func SchnorrCombineSigs(curve *secp256k1.KoblitzCurve, + sigs []*Signature) (*Signature, error) { + sigss := make([][]byte, len(sigs), len(sigs)) + for i, sig := range sigs { + if sig == nil { + return nil, fmt.Errorf("nil signature") + } + + if i > 0 { + if sigs[i-1].GetR().Cmp(sig.GetR()) != 0 { + str := fmt.Sprintf("nonmatching r values for idx %v, %v", + i, i-1) + return nil, schnorrError(ErrNonmatchingR, str) + } + } + + sigss[i] = BigIntToEncodedBytes(sig.GetS())[:] + } + + combinedSigS, err := schnorrCombineSigs(curve, sigss) + if err != nil { + return nil, err + } + + return NewSignature(sigs[0].R, combinedSigS), nil +} diff --git a/dcrec/secp256k1/schnorr/threshold_test.go b/dcrec/secp256k1/schnorr/threshold_test.go new file mode 100644 index 00000000..f243b0a5 --- /dev/null +++ b/dcrec/secp256k1/schnorr/threshold_test.go @@ -0,0 +1,437 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package schnorr + +import ( + "bytes" + "encoding/hex" + "math/rand" + "testing" + + "github.com/btcsuite/fastsha256" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/stretchr/testify/assert" +) + +type signerHex struct { + privkey string + privateNonce string + pubKeySumLocal string + partialSignature string +} + +// Sha256. The internal tests from secp256k1 are kind of screwy and for +// partial signatures call this hash function instead of testSchnorrHash. +func testSchnorrSha256Hash(msg []byte) []byte { + sha := fastsha256.Sum256(msg) + return sha[:] +} + +type ThresholdTestVectorHex struct { + msg string + signersHex []signerHex + combinedSignature string +} + +// thresholdTestVectorsHex were produced using the testing functions +// implemented in libsecp256k1. +// https://github.com/bitcoin/secp256k1/blob/258720851e24e23c1036b4802a185850e258a105/src/modules/schnorr/tests_impl.h +var thresholdTestVectorsHex = []*ThresholdTestVectorHex{ + &ThresholdTestVectorHex{ + msg: "0000000000F8FFFFFFFFFF0000000000F0FF3F0000000000C0FFFFFFFF000000", + signersHex: []signerHex{ + signerHex{ + privkey: "FFFFFFFFFFFFFF0300007800000000000000000000000000000000C0FF010000", + privateNonce: "2C36EFC7F20528A4BA3EF4908786A68B1C5CE4C8BD583F430EBC7E0D8DA853E1", + pubKeySumLocal: "031A763FD2C84354C4BD2B63487952A34DD0E2B9D1E44CA226DC7BAD086D40C29F", + partialSignature: "22E0613D5D406CBAE5B5EFBC39B974E95EFC0DAEF946469F5F68B8E416F398D3B741EED58262E511A6D4BBAD386D6F344AEC2E6A383DA991384F7083B1CE5393", + }, + signerHex{ + privkey: "FFFFFFFFFFFFFF0F00000000F8FF0700F0FFFFFFFFFF0300FEFFFFFFFF070000", + privateNonce: "52347F775C44520F7F3E60ADDAA4E1CBA89E2BE3A82C89B9E11556B081A3D3EF", + pubKeySumLocal: "037C1D54CF0E2C3947AC9D0C046325A441D2C49FE866423E7FE2A088B2AAA57BC6", + partialSignature: "22E0613D5D406CBAE5B5EFBC39B974E95EFC0DAEF946469F5F68B8E416F398D3D6BB539760D9C5965204FEB2F2399D50242E112C74D11642B2468AA82245A178", + }, + signerHex{ + privkey: "0000000000000000000000FEFFFFFF7F00F8FFFFFFFFFF0F000080FF0FE0FFFF", + privateNonce: "6F3E98F7B477BAD4D2F7172925EC495BDF1E80A7D4D6E67C180D8577E7D71E9C", + pubKeySumLocal: "033C9E82854DCA364A32295BCA2C8C3CC19633FDB1A51D5E1C6B41A3E19E8468B3", + partialSignature: "22E0613D5D406CBAE5B5EFBC39B974E95EFC0DAEF946469F5F68B8E416F398D36FB69AE37DDBF2CCB254CE6D936D285E549A1225AF267AED8F121C84BC68CA29", + }, + }, + combinedSignature: "22E0613D5D406CBAE5B5EFBC39B974E95EFC0DAEF946469F5F68B8E416F398D3FDB3DD5061189D74AB2E88CDBE1434E4090574D5ACEC9A85B9D5B923C0467DF3", + }, + &ThresholdTestVectorHex{ + msg: "FFFFE30000F0FFFFFFFFFF0700000000000000000000040000000080FFFFFFFF", + signersHex: []signerHex{ + signerHex{ + privkey: "FFFFFFFFFFFFFFFFFFFFFFFF1F000000E0FFFFFFFFFFFF7F0000FFFFFFFFFFFF", + privateNonce: "A2880E3B8C3484EAC0B48D440C866480DB46EBD2E2171D9E72C2168BA090E882", + pubKeySumLocal: "022D3E998D4C45E497507F9181DB7638303607F70DCDDCF34AF6189BCA1E02B765", + partialSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C63C527290CD1A19C9F84BDF57C5E241C15FAE84BB3A8C7394A2BF67CF4D5CD2F7", + }, + signerHex{ + privkey: "000000000000C0FFFF0100FFFFFFFFFFFFFFFF0000000000000000000000F8CF", + privateNonce: "5527AE1CF2355A01477A672F6538C8F45BBE0B24BD67C0C97B70EE68CF92D169", + pubKeySumLocal: "0391076906B5293F3061CF4C54E18241156B37CC49087C5FF8A2B898AD020C21C6", + partialSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C60A34632DCE40BFDB7D63EFA0D615F75A6CC6A5E39DF84B5016F2CE59CD0EB0C8", + }, + signerHex{ + privkey: "0000F83F0000000000F03F00000000C0FFFFFF0700000000FEFFFFFFFFFF3F00", + privateNonce: "4CAF0B156FAA4C59B2C2B98C3ACDE8FF2ADDB9B0795A8D8C64F93C2230B4EE00", + pubKeySumLocal: "02756380BE40F3D244ACE2AA1385B0E9ECA2A69D4845FD4C3351C80B6D7404AE01", + partialSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C6025C67BA496A41EE038A749FFBAF8B8D849AB5FD489A61EC7750AB2C3F36F6DC", + }, + signerHex{ + privkey: "0100F8FFFFFFFF0F000000000000FEFF0F00000000FC7F0000F8FFFFFFFFFFFF", + privateNonce: "EDC53C5EF7441CF9B415AC2D670EB4850AE9EC9066A4BF24CBFD91505E4C94B6", + pubKeySumLocal: "039B5600241B681D80C2A9E8C2E4ED72E96E90AE04C857F2B0D36AE110486395B8", + partialSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C6B8C285954F28998E45065E8E97FB7021CEDD8AD112A9238331F34AB0559AC64A", + }, + signerHex{ + privkey: "00000080030000000000000000E0FFFFFFFFFFFFFFFFFEFF0700000000FCFFFF", + privateNonce: "BD7014C0A5CD2ED2324994BE3235B12A18359BE7D4A9817D10EEDEB84E79EA25", + pubKeySumLocal: "0349DB0A59FE3B1D45CE29AE43C3E2D52DFF2A7854284F028C166E861321DC5ACE", + partialSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C63DF61C0D15838D8E45ECD5E304C70DA5F34726F61D48CE74EEFD671578945F0B", + }, + }, + combinedSignature: "1B7510DAF07385B967325B00C2D38C2E510DE7B7E4DD3FA21BFB3F9BDC67C8C63F9BDF1B497142B0042D780A346A42725885B57CA1C8728D9221348E579B5EAF", + }, + &ThresholdTestVectorHex{ + msg: "E63D5EE254ECC86A8BD6439E6434148BD02823E678B96816B08581EB33D2192A", + signersHex: []signerHex{ + signerHex{ + privkey: "7A39B587A29317AFD793F701D26542E3081D93AB58EA28968C491D7E066A1798", + privateNonce: "FA1F251871E84908FFCDAB664BB5BCBD81C6E99492A01D087CE251E70749A628", + pubKeySumLocal: "038054B95E8008E20BB4217DE652DB2483428851A01D4DB2E8012A69F1C665465F", + partialSignature: "3AB82171C8668D549A1D41B8AB191B862891F06785FD2AEB01C314A0393F42F4A641D545821A52F7C8C2A3E4C8A142672AE5B912EA47C7146A90A0C796A32F94", + }, + signerHex{ + privkey: "A1FABBBA0C76DD5F255F1A1228D57ADBEBB935C6A80B2D877D3E153DBF946C33", + privateNonce: "C5BEF72D01E30C9D31D6B48A2582FE11E10383F99B933878C308F63718F7FF19", + pubKeySumLocal: "02A4046C2D277BE14A6DF6A9B7664CBB075A3F4B68C2E8D64B9F71A495132D9E65", + partialSignature: "3AB82171C8668D549A1D41B8AB191B862891F06785FD2AEB01C314A0393F42F4B7036345F99BD18852418A3FAF40A3E89B61CA933795282B89D1026CEE88E0E2", + }, + signerHex{ + privkey: "41BB6B56F127607218B6C0D4DDC9595A6639172ECAFBB73134B16EF6852627DD", + privateNonce: "07B4D13FFD4DC040AEDEC4FA41584F8DC30E01DA86F1D945A787FCD3AEEC2231", + pubKeySumLocal: "0221FF2EC05F57519E33A035B9566E2E7B14CB5E28C4AC6E395E5E2F22A2DD0B69", + partialSignature: "3AB82171C8668D549A1D41B8AB191B862891F06785FD2AEB01C314A0393F42F47BEF592BC2C18D90ED2CCEF245ED861443C71F54913794913A614402E7CA0099", + }, + }, + combinedSignature: "3AB82171C8668D549A1D41B8AB191B862891F06785FD2AEB01C314A0393F42F4D93491B73E77B2110830FD16BDCF6C654F5FC61403CBE3956EF088AA9CBFCFCE", + }, + &ThresholdTestVectorHex{ + msg: "07BE073995BF78D440B660AF7B06DC0E9BA120A8D686201989BA99AA384ADF12", + signersHex: []signerHex{ + signerHex{ + privkey: "23527007EB04CA9E0789C3F452A0B154338FCDEF1FF8C88C2FD52066E3684E52", + privateNonce: "EFB2ED7B375587DAEBBE8442768402CAA5F841F6342CA5D27C5F7BD7726D5192", + pubKeySumLocal: "027C8707533D31CD171EA0EF51616B45AE62BCF208DD54708D6A6FFA2AEA6527C5", + partialSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC38A1E913B96EFD3B6155A6B14913819472E2FFE710C51B94A668A6D202CC1D68A", + }, + signerHex{ + privkey: "CB7ADC7097149F4A803D06F772DB7787BCCAFEB2EB9B83BCD87F8E1DB7C7DEB4", + privateNonce: "3C13E81FAB1BF65B86309CADD2696BFB6155C36D193715D78F199AA0FFC856A9", + pubKeySumLocal: "02A3FD12EEFD04192106C3609CB5C9FA6C6DFDB2380236BE0A9440563F4018C473", + partialSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC39E30A3A4B6B95CD0F3650961A75E00912B4B1A5D504027C60674C7B347A74FEB", + }, + signerHex{ + privkey: "13F64737DB01990CFB23DCF911EE9544821367BABF250E8D14278518523AE2EE", + privateNonce: "2EAFD6A829C8174E28673D42299A4E7BAD51B736A57DEC3E654B48BF7530720E", + pubKeySumLocal: "027851635454EDA9BFDF6405122C9E71E1FB0F3D8B460AD77E30576DC33F46DB7F", + partialSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC39BA4523516A1DE4049B20ACC0FF5335BBC2769A0EE2C2A90B85DAEEDC5DC5534", + }, + signerHex{ + privkey: "F969F8FFA6320EFE7450AFDED4802DBB65EC0FC634831282898A9E1A148A672D", + privateNonce: "97C3B4FEB72BF8045943A5AD0A994E61A71EFFFBDEECC9F82C47A33FAAD545AA", + pubKeySumLocal: "03879186ED812C4106644FCB1E47951788799566487C7A1996AB6B226ABCD44361", + partialSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC3C03E18DDC9EE7181341D9B720F660F445E7900A5D6A156A237DC45F99D69A239", + }, + signerHex{ + privkey: "7290D1CE9CED9EA92826BB930746B62C0D6476233778A25B46602B2CDD22521C", + privateNonce: "B6E877016CFDE7F49324D7270B2617B7E6BAE84EB674667143799DB5474A8EE0", + pubKeySumLocal: "0201EED57034444E89738D8E91D969DB9D46C08ADE5E8F5E135A3510881ACBEC5B", + partialSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC3C0FB656897A0401B9118D3C243E923664E5C33818386841D0729C4552B2D0285", + }, + }, + combinedSignature: "3D13F453F0C41503C00BEA5D558BB267E3E886C588A8A5BABCEED8A33839BEC3452D055BC5D9C06417A7EE769BDA7FE2926B1FE2970C05AD24EBD26992395CA4", + }, +} + +type signer struct { + privkey []byte + pubkey *secp256k1.PublicKey + privateNonce []byte + publicNonce *secp256k1.PublicKey + pubKeySumLocal *secp256k1.PublicKey + partialSignature []byte +} + +type ThresholdTestVector struct { + msg []byte + signers []signer + combinedSignature []byte +} + +func GetThresholdTestVectors() []*ThresholdTestVector { + curve := secp256k1.S256() + + tvs := make([]*ThresholdTestVector, 0) + for _, v := range thresholdTestVectorsHex { + msg, _ := hex.DecodeString(v.msg) + combSig, _ := hex.DecodeString(v.combinedSignature) + signers := make([]signer, len(v.signersHex), len(v.signersHex)) + for i, signerHex := range v.signersHex { + privkeyB, _ := hex.DecodeString(signerHex.privkey) + _, pubkey := secp256k1.PrivKeyFromBytes(curve, privkeyB) + privateNonceB, _ := hex.DecodeString(signerHex.privateNonce) + _, noncePub := secp256k1.PrivKeyFromBytes(curve, privateNonceB) + pubKeySumLocalB, _ := hex.DecodeString(signerHex.pubKeySumLocal) + pubKeySumLocal, _ := secp256k1.ParsePubKey(pubKeySumLocalB, curve) + partialSignature, _ := hex.DecodeString(signerHex.partialSignature) + + signers[i].privkey = privkeyB + signers[i].pubkey = pubkey + signers[i].privateNonce = privateNonceB + signers[i].publicNonce = noncePub + signers[i].pubKeySumLocal = pubKeySumLocal + signers[i].partialSignature = partialSignature + } + + lv := ThresholdTestVector{ + msg: msg, + signers: signers, + combinedSignature: combSig, + } + + tvs = append(tvs, &lv) + } + + return tvs +} + +func TestSchnorrThresholdRef(t *testing.T) { + curve := secp256k1.S256() + tvs := GetThresholdTestVectors() + for _, tv := range tvs { + partialSignatures := make([]*Signature, len(tv.signers), len(tv.signers)) + + // Ensure all the pubkey and nonce derivation is correct. + for i, signer := range tv.signers { + nonce := nonceRFC6979(signer.privkey, tv.msg, nil, + Sha256VersionStringRFC6979) + assert.Equal(t, nonce, signer.privateNonce) + + _, pubkey := secp256k1.PrivKeyFromBytes(curve, signer.privkey) + assert.Equal(t, pubkey.Serialize(), signer.pubkey.Serialize()) + + _, pubNonce := secp256k1.PrivKeyFromBytes(curve, nonce) + assert.Equal(t, pubNonce.Serialize(), signer.publicNonce.Serialize()) + + // Calculate the public nonce sum. + pubKeys := make([]*secp256k1.PublicKey, len(tv.signers)-1, + len(tv.signers)-1) + + itr := 0 + for _, signer := range tv.signers { + if bytes.Equal(signer.publicNonce.Serialize(), + tv.signers[i].publicNonce.Serialize()) { + continue + } + pubKeys[itr] = signer.publicNonce + itr++ + } + publicNonceSum := CombinePubkeys(curve, pubKeys) + assert.Equal(t, publicNonceSum, signer.pubKeySumLocal) + + sig, err := schnorrPartialSign(curve, tv.msg, signer.privkey, nonce, + publicNonceSum, testSchnorrSha256Hash) + assert.NoError(t, err) + assert.Equal(t, sig.Serialize(), signer.partialSignature) + + partialSignatures[i] = sig + } + + // Combine signatures. + combinedSignature, err := SchnorrCombineSigs(curve, partialSignatures) + assert.NoError(t, err) + assert.Equal(t, combinedSignature.Serialize(), tv.combinedSignature) + + // Combine pubkeys. + allPubkeys := make([]*secp256k1.PublicKey, len(tv.signers), + len(tv.signers)) + for i, signer := range tv.signers { + allPubkeys[i] = signer.pubkey + } + allPksSum := CombinePubkeys(curve, allPubkeys) + + // Verify the combined signature and public keys. + ok, err := schnorrVerify(curve, combinedSignature.Serialize(), + allPksSum, tv.msg, testSchnorrSha256Hash) + assert.NoError(t, err) + assert.Equal(t, true, ok) + } +} + +func TestSchnorrThreshold(t *testing.T) { + tRand := rand.New(rand.NewSource(54321)) + maxSignatories := 10 + numTests := 100 + numSignatories := maxSignatories * numTests + + curve := secp256k1.S256() + msg, _ := hex.DecodeString( + "07BE073995BF78D440B660AF7B06DC0E9BA120A8D686201989BA99AA384ADF12") + privkeys := randPrivKeyList(curve, numSignatories) + + for i := 0; i < numTests; i++ { + numKeysForTest := tRand.Intn(maxSignatories-2) + 2 + keyIndex := i * maxSignatories + keysToUse := make([]*secp256k1.PrivateKey, numKeysForTest, numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + keysToUse[j] = privkeys[j+keyIndex] + } + pubKeysToUse := make([]*secp256k1.PublicKey, numKeysForTest, + numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + _, pubkey := secp256k1.PrivKeyFromBytes(curve, + keysToUse[j].Serialize()) + pubKeysToUse[j] = pubkey + } + privNoncesToUse := make([]*secp256k1.PrivateKey, numKeysForTest, + numKeysForTest) + pubNoncesToUse := make([]*secp256k1.PublicKey, numKeysForTest, + numKeysForTest) + for j := 0; j < numKeysForTest; j++ { + nonce := nonceRFC6979(keysToUse[j].Serialize(), msg, nil, + BlakeVersionStringRFC6979) + privNonce, pubNonce := secp256k1.PrivKeyFromBytes(curve, + nonce) + privNoncesToUse[j] = privNonce + pubNoncesToUse[j] = pubNonce + } + + partialSignatures := make([]*Signature, numKeysForTest, numKeysForTest) + + // Partial signature generation. + for j, _ := range keysToUse { + thisPubNonce := pubNoncesToUse[j] + localPubNonces := make([]*secp256k1.PublicKey, numKeysForTest-1, + numKeysForTest-1) + itr := 0 + for _, pubNonce := range pubNoncesToUse { + if bytes.Equal(thisPubNonce.Serialize(), pubNonce.Serialize()) { + continue + } + localPubNonces[itr] = pubNonce + itr++ + } + publicNonceSum := CombinePubkeys(curve, localPubNonces) + + sig, err := schnorrPartialSign(curve, msg, keysToUse[j].Serialize(), + privNoncesToUse[j].Serialize(), publicNonceSum, + chainhash.HashFuncB) + assert.NoError(t, err) + + partialSignatures[j] = sig + } + + // Combine signatures. + combinedSignature, err := SchnorrCombineSigs(curve, partialSignatures) + assert.NoError(t, err) + + // Combine pubkeys. + allPubkeys := make([]*secp256k1.PublicKey, numKeysForTest, + numKeysForTest) + for j, pubkey := range pubKeysToUse { + allPubkeys[j] = pubkey + } + allPksSum := CombinePubkeys(curve, allPubkeys) + + // Verify the combined signature and public keys. + ok, err := schnorrVerify(curve, combinedSignature.Serialize(), + allPksSum, msg, chainhash.HashFuncB) + assert.NoError(t, err) + assert.Equal(t, true, ok) + + // Corrupt some memory and make sure it breaks something. + corruptWhat := tRand.Intn(3) + randItem := tRand.Intn(numKeysForTest - 1) + + // Corrupt private key. + if corruptWhat == 0 { + privSerCorrupt := keysToUse[randItem].Serialize() + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + privSerCorrupt[pos] ^= 1 << uint8(bitPos) + keysToUse[randItem].D.SetBytes(privSerCorrupt) + } + // Corrupt public key. + if corruptWhat == 1 { + pubXCorrupt := BigIntToEncodedBytes(pubKeysToUse[randItem].GetX()) + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + pubXCorrupt[pos] ^= 1 << uint8(bitPos) + pubKeysToUse[randItem].GetX().SetBytes(pubXCorrupt[:]) + } + // Corrupt private nonce. + if corruptWhat == 2 { + privSerCorrupt := privNoncesToUse[randItem].Serialize() + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + privSerCorrupt[pos] ^= 1 << uint8(bitPos) + privNoncesToUse[randItem].D.SetBytes(privSerCorrupt) + } + // Corrupt public nonce. + if corruptWhat == 3 { + pubXCorrupt := BigIntToEncodedBytes(pubNoncesToUse[randItem].GetX()) + pos := tRand.Intn(31) + bitPos := tRand.Intn(7) + pubXCorrupt[pos] ^= 1 << uint8(bitPos) + pubNoncesToUse[randItem].GetX().SetBytes(pubXCorrupt[:]) + } + + for j, _ := range keysToUse { + thisPubNonce := pubNoncesToUse[j] + localPubNonces := make([]*secp256k1.PublicKey, numKeysForTest-1, + numKeysForTest-1) + itr := 0 + for _, pubNonce := range pubNoncesToUse { + if bytes.Equal(thisPubNonce.Serialize(), pubNonce.Serialize()) { + continue + } + localPubNonces[itr] = pubNonce + itr++ + } + publicNonceSum := CombinePubkeys(curve, localPubNonces) + + sig, _ := schnorrPartialSign(curve, msg, keysToUse[j].Serialize(), + privNoncesToUse[j].Serialize(), publicNonceSum, + chainhash.HashFuncB) + + partialSignatures[j] = sig + } + + // Combine signatures. + combinedSignature, _ = SchnorrCombineSigs(curve, partialSignatures) + + // Combine pubkeys. + allPubkeys = make([]*secp256k1.PublicKey, numKeysForTest, + numKeysForTest) + for j, pubkey := range pubKeysToUse { + allPubkeys[j] = pubkey + } + allPksSum = CombinePubkeys(curve, allPubkeys) + + // Nothing that makes it here should be valid. + if allPksSum != nil && combinedSignature != nil { + ok, _ = schnorrVerify(curve, combinedSignature.Serialize(), + allPksSum, msg, chainhash.HashFuncB) + assert.Equal(t, false, ok) + } + } +} diff --git a/btcec/secp256k1.go b/dcrec/secp256k1/secp256k1.go similarity index 99% rename from btcec/secp256k1.go rename to dcrec/secp256k1/secp256k1.go index 5e7bffe4..06820191 100644 --- a/btcec/secp256k1.go +++ b/dcrec/secp256k1/secp256k1.go @@ -1,8 +1,9 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 // Auto-generated file (see genprecomps.go) // DO NOT EDIT diff --git a/btcec/signature.go b/dcrec/secp256k1/signature.go similarity index 91% rename from btcec/signature.go rename to dcrec/secp256k1/signature.go index 126f8944..3c03a606 100644 --- a/btcec/signature.go +++ b/dcrec/secp256k1/signature.go @@ -1,8 +1,9 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec +package secp256k1 import ( "bytes" @@ -42,9 +43,14 @@ var ( oneInitializer = []byte{0x01} ) +// NewSignature instantiates a new signature given some R,S values. +func NewSignature(r, s *big.Int) *Signature { + return &Signature{r, s} +} + // Serialize returns the ECDSA signature in the more strict DER format. Note // that the serialized bytes returned do not include the appended hash type -// used in Bitcoin signature scripts. +// used in Decred signature scripts. // // encoding/asn1 is broken so we hand roll this output: // @@ -79,7 +85,7 @@ func (sig *Signature) Serialize() []byte { // Verify calls ecdsa.Verify to verify the signature of hash using the public // key. It returns true if the signature is valid, false otherwise. func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool { - return ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S) + return ecdsa.Verify(pubKey.ToECDSA(), hash, sig.GetR(), sig.GetS()) } func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) { @@ -271,7 +277,7 @@ func hashToInt(hash []byte, c elliptic.Curve) *big.Int { // SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details // in the inner loop in Step 1. The counter provided is actually the j parameter // of the loop * 2 - on the first iteration of j we do the R case, else the -R -// case in step 1.6. This counter is used in the bitcoin compressed signature +// case in step 1.6. This counter is used in the decred compressed signature // format and thus we match bitcoind's behaviour here. func recoverKeyFromSignature(curve *KoblitzCurve, sig *Signature, msg []byte, iter int, doChecks bool) (*PublicKey, error) { @@ -286,7 +292,7 @@ func recoverKeyFromSignature(curve *KoblitzCurve, sig *Signature, msg []byte, // convert 02 to point R. (step 1.2 and 1.3). If we are on an odd // iteration then 1.6 will be done with -R, so we calculate the other // term when uncompressing the point. - Ry, err := decompressPoint(curve, Rx, iter%2 == 1) + Ry, err := DecompressPoint(curve, Rx, iter%2 == 1) if err != nil { return nil, err } @@ -409,12 +415,13 @@ func RecoverCompact(curve *KoblitzCurve, signature, return key, ((signature[0] - 27) & 4) == 4, nil } -// signRFC6979 generates a deterministic ECDSA signature according to RFC 6979 and BIP 62. +// signRFC6979 generates a deterministic ECDSA signature according to RFC 6979 +// and BIP 62. func signRFC6979(privateKey *PrivateKey, hash []byte) (*Signature, error) { privkey := privateKey.ToECDSA() N := order - k := nonceRFC6979(privkey.D, hash) + k := NonceRFC6979(privkey.D, hash, nil, nil) inv := new(big.Int).ModInverse(k, N) r, _ := privkey.Curve.ScalarBaseMult(k.Bytes()) if r.Cmp(N) == 1 { @@ -440,10 +447,11 @@ func signRFC6979(privateKey *PrivateKey, hash []byte) (*Signature, error) { return &Signature{R: r, S: s}, nil } -// nonceRFC6979 generates an ECDSA nonce (`k`) deterministically according to RFC 6979. -// It takes a 32-byte hash as an input and returns 32-byte nonce to be used in ECDSA algorithm. -func nonceRFC6979(privkey *big.Int, hash []byte) *big.Int { - +// NonceRFC6979 generates an ECDSA nonce (`k`) deterministically according to +// RFC 6979. It takes a 32-byte hash as an input and returns 32-byte nonce to +// be used in ECDSA algorithm. +func NonceRFC6979(privkey *big.Int, hash []byte, extra []byte, + version []byte) *big.Int { curve := S256() q := curve.Params().N x := privkey @@ -453,6 +461,16 @@ func nonceRFC6979(privkey *big.Int, hash []byte) *big.Int { holen := alg().Size() rolen := (qlen + 7) >> 3 bx := append(int2octets(x, rolen), bits2octets(hash, curve, rolen)...) + if len(extra) == 32 { + bx = append(bx, extra...) + } + if len(version) == 16 && len(extra) == 32 { + bx = append(bx, extra...) + } + if len(version) == 16 && len(extra) != 32 { + bx = append(bx, bytes.Repeat([]byte{0x00}, 32)...) + bx = append(bx, version...) + } // Step B v := bytes.Repeat(oneInitializer, holen) @@ -530,3 +548,18 @@ func bits2octets(in []byte, curve elliptic.Curve, rolen int) []byte { } return int2octets(z2, rolen) } + +// GetR satisfies the chainec PublicKey interface. +func (s Signature) GetR() *big.Int { + return s.R +} + +// GetS satisfies the chainec PublicKey interface. +func (s Signature) GetS() *big.Int { + return s.S +} + +// GetType satisfies the chainec Signature interface. +func (s Signature) GetType() int { + return ecTypeSecp256k1 +} diff --git a/btcec/signature_test.go b/dcrec/secp256k1/signature_test.go similarity index 94% rename from btcec/signature_test.go rename to dcrec/secp256k1/signature_test.go index 739a0fba..769bdbf9 100644 --- a/btcec/signature_test.go +++ b/dcrec/secp256k1/signature_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcec_test +package secp256k1_test import ( "bytes" @@ -12,7 +13,8 @@ import ( "math/big" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/btcsuite/fastsha256" ) @@ -37,7 +39,7 @@ func decodeHex(hexStr string) []byte { } var signatureTests = []signatureTest{ - // signatures from bitcoin blockchain tx + // signatures from decred blockchain tx // 0437cd7f8525ceed2324359c2d0ba26006d92d85 { name: "valid signature.", @@ -332,9 +334,9 @@ func TestSignatures(t *testing.T) { for _, test := range signatureTests { var err error if test.der { - _, err = btcec.ParseDERSignature(test.sig, btcec.S256()) + _, err = secp256k1.ParseDERSignature(test.sig, secp256k1.S256()) } else { - _, err = btcec.ParseSignature(test.sig, btcec.S256()) + _, err = secp256k1.ParseSignature(test.sig, secp256k1.S256()) } if err != nil { if test.isValid { @@ -356,14 +358,14 @@ func TestSignatures(t *testing.T) { func TestSignatureSerialize(t *testing.T) { tests := []struct { name string - ecsig *btcec.Signature + ecsig *secp256k1.Signature expected []byte }{ - // signature from bitcoin blockchain tx + // signature from decred blockchain tx // 0437cd7f8525ceed2324359c2d0ba26006d92d85 { "valid 1 - r and s most significant bits are zero", - &btcec.Signature{ + &secp256k1.Signature{ R: fromHex("4e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd41"), S: fromHex("181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09"), }, @@ -379,11 +381,11 @@ func TestSignatureSerialize(t *testing.T) { 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, }, }, - // signature from bitcoin blockchain tx + // signature from decred blockchain tx // cb00f8a0573b18faa8c4f467b049f5d202bf1101d9ef2633bc611be70376a4b4 { "valid 2 - r most significant bit is one", - &btcec.Signature{ + &secp256k1.Signature{ R: fromHex("0082235e21a2300022738dabb8e1bbd9d19cfb1e7ab8c30a23b0afbb8d178abcf3"), S: fromHex("24bf68e256c534ddfaf966bf908deb944305596f7bdcc38d69acad7f9c868724"), }, @@ -399,13 +401,13 @@ func TestSignatureSerialize(t *testing.T) { 0xac, 0xad, 0x7f, 0x9c, 0x86, 0x87, 0x24, }, }, - // signature from bitcoin blockchain tx + // signature from decred blockchain tx // fda204502a3345e08afd6af27377c052e77f1fefeaeb31bdd45f1e1237ca5470 { "valid 3 - s most significant bit is one", - &btcec.Signature{ + &secp256k1.Signature{ R: fromHex("1cadddc2838598fee7dc35a12b340c6bde8b389f7bfd19a1252a17c4b5ed2d71"), - S: new(big.Int).Add(fromHex("00c1a251bbecb14b058a8bd77f65de87e51c47e95904f4c0e9d52eddc21c1415ac"), btcec.S256().N), + S: new(big.Int).Add(fromHex("00c1a251bbecb14b058a8bd77f65de87e51c47e95904f4c0e9d52eddc21c1415ac"), secp256k1.S256().N), }, []byte{ 0x30, 0x45, 0x02, 0x20, 0x1c, 0xad, 0xdd, 0xc2, @@ -421,7 +423,7 @@ func TestSignatureSerialize(t *testing.T) { }, { "zero signature", - &btcec.Signature{ + &secp256k1.Signature{ R: big.NewInt(0), S: big.NewInt(0), }, @@ -439,19 +441,19 @@ func TestSignatureSerialize(t *testing.T) { } } -func testSignCompact(t *testing.T, tag string, curve *btcec.KoblitzCurve, +func testSignCompact(t *testing.T, tag string, curve *secp256k1.KoblitzCurve, data []byte, isCompressed bool) { - tmp, _ := btcec.NewPrivateKey(curve) - priv := (*btcec.PrivateKey)(tmp) + tmp, _ := secp256k1.GeneratePrivateKey(curve) + priv := (*secp256k1.PrivateKey)(tmp) hashed := []byte("testing") - sig, err := btcec.SignCompact(curve, priv, hashed, isCompressed) + sig, err := secp256k1.SignCompact(curve, priv, hashed, isCompressed) if err != nil { t.Errorf("%s: error signing: %s", tag, err) return } - pk, wasCompressed, err := btcec.RecoverCompact(curve, sig, hashed) + pk, wasCompressed, err := secp256k1.RecoverCompact(curve, sig, hashed) if err != nil { t.Errorf("%s: error recovering: %s", tag, err) return @@ -475,7 +477,7 @@ func testSignCompact(t *testing.T, tag string, curve *btcec.KoblitzCurve, sig[0] += 4 } - pk, wasCompressed, err = btcec.RecoverCompact(curve, sig, hashed) + pk, wasCompressed, err = secp256k1.RecoverCompact(curve, sig, hashed) if err != nil { t.Errorf("%s: error recovering (2): %s", tag, err) return @@ -503,7 +505,7 @@ func TestSignCompact(t *testing.T) { continue } compressed := i%2 != 0 - testSignCompact(t, name, btcec.S256(), data, compressed) + testSignCompact(t, name, secp256k1.S256(), data, compressed) } } @@ -558,11 +560,11 @@ func TestRFC6979(t *testing.T) { } for i, test := range tests { - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), decodeHex(test.key)) + privKey, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), decodeHex(test.key)) hash := fastsha256.Sum256([]byte(test.msg)) // Ensure deterministically generated nonce is the expected value. - gotNonce := btcec.TstNonceRFC6979(privKey.D, hash[:]).Bytes() + gotNonce := secp256k1.TstNonceRFC6979(privKey.D, hash[:]).Bytes() wantNonce := decodeHex(test.nonce) if !bytes.Equal(gotNonce, wantNonce) { t.Errorf("NonceRFC6979 #%d (%s): Nonce is incorrect: "+ diff --git a/btcjson/CONTRIBUTORS b/dcrjson/CONTRIBUTORS similarity index 100% rename from btcjson/CONTRIBUTORS rename to dcrjson/CONTRIBUTORS diff --git a/btcjson/README.md b/dcrjson/README.md similarity index 73% rename from btcjson/README.md rename to dcrjson/README.md index 2a2f1f16..4b9ed689 100644 --- a/btcjson/README.md +++ b/dcrjson/README.md @@ -1,22 +1,21 @@ -btcjson +dcrjson ======= -[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] -(https://travis-ci.org/btcsuite/btcd) [![ISC License] +[![ISC License] (http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) Package btcjson implements concrete types for marshalling to and from the bitcoin JSON-RPC API. A comprehensive suite of tests is provided to ensure proper functionality. -Although this package was primarily written for the btcsuite, it has +Although this package was primarily written for the decred, it has intentionally been designed so it can be used as a standalone package for any projects needing to marshal to and from bitcoin JSON-RPC requests and responses. Note that although it's possible to use this package directly to implement an RPC client, it is not recommended since it is only intended as an infrastructure package. Instead, RPC clients should use the -[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package which provides +[btcrpcclient](https://github.com/decred/btcrpcclient) package which provides a full blown RPC client with many features such as automatic connection management, websocket support, automatic notification re-registration on reconnect, and conversion from the raw underlying RPC types (strings, floats, @@ -25,46 +24,46 @@ ints, etc) to higher-level types with many nice and useful properties. ## Documentation [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/btcsuite/btcd/btcjson) +(http://godoc.org/github.com/decred/dcrd/btcjson) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site -[here](http://godoc.org/github.com/btcsuite/btcd/btcjson). +[here](http://godoc.org/github.com/decred/dcrd/btcjson). You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/btcjson +http://localhost:6060/pkg/github.com/decred/dcrd/btcjson ## Installation ```bash -$ go get github.com/btcsuite/btcd/btcjson +$ go get github.com/decred/dcrd/btcjson ``` ## Examples * [Marshal Command] - (http://godoc.org/github.com/btcsuite/btcd/btcjson#example-MarshalCmd) + (http://godoc.org/github.com/decred/dcrd/btcjson#example-MarshalCmd) Demonstrates how to create and marshal a command into a JSON-RPC request. * [Unmarshal Command] - (http://godoc.org/github.com/btcsuite/btcd/btcjson#example-UnmarshalCmd) + (http://godoc.org/github.com/decred/dcrd/btcjson#example-UnmarshalCmd) Demonstrates how to unmarshal a JSON-RPC request and then unmarshal the concrete request into a concrete command. * [Marshal Response] - (http://godoc.org/github.com/btcsuite/btcd/btcjson#example-MarshalResponse) + (http://godoc.org/github.com/decred/dcrd/btcjson#example-MarshalResponse) Demonstrates how to marshal a JSON-RPC response. * [Unmarshal Response] - (http://godoc.org/github.com/btcsuite/btcd/btcjson#example-package--UnmarshalResponse) + (http://godoc.org/github.com/decred/dcrd/btcjson#example-package--UnmarshalResponse) Demonstrates how to unmarshal a JSON-RPC response and then unmarshal the result field in the response to a concrete type. ## GPG Verification Key All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To +has not been tampered with and is coming from the decred developers. To verify the signature perform the following: - Download the public key from the Conformal website at diff --git a/btcjson/btcdextcmds.go b/dcrjson/btcdextcmds.go similarity index 98% rename from btcjson/btcdextcmds.go rename to dcrjson/btcdextcmds.go index 7d416395..c03d0fba 100644 --- a/btcjson/btcdextcmds.go +++ b/dcrjson/btcdextcmds.go @@ -1,11 +1,12 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by // a chain server with btcd extensions. -package btcjson +package dcrjson // NodeSubCmd defines the type used in the addnode JSON-RPC command for the // sub command field. diff --git a/btcjson/btcdextcmds_test.go b/dcrjson/btcdextcmds_test.go similarity index 72% rename from btcjson/btcdextcmds_test.go rename to dcrjson/btcdextcmds_test.go index c4d8d498..8ccc647e 100644 --- a/btcjson/btcdextcmds_test.go +++ b/dcrjson/btcdextcmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestBtcdExtCmds tests all of the btcd extended commands marshal and unmarshal @@ -32,108 +33,108 @@ func TestBtcdExtCmds(t *testing.T) { { name: "debuglevel", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("debuglevel", "trace") + return dcrjson.NewCmd("debuglevel", "trace") }, staticCmd: func() interface{} { - return btcjson.NewDebugLevelCmd("trace") + return dcrjson.NewDebugLevelCmd("trace") }, marshalled: `{"jsonrpc":"1.0","method":"debuglevel","params":["trace"],"id":1}`, - unmarshalled: &btcjson.DebugLevelCmd{ + unmarshalled: &dcrjson.DebugLevelCmd{ LevelSpec: "trace", }, }, { name: "node", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("node", btcjson.NRemove, "1.1.1.1") + return dcrjson.NewCmd("node", dcrjson.NRemove, "1.1.1.1") }, staticCmd: func() interface{} { - return btcjson.NewNodeCmd("remove", "1.1.1.1", nil) + return dcrjson.NewNodeCmd("remove", "1.1.1.1", nil) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["remove","1.1.1.1"],"id":1}`, - unmarshalled: &btcjson.NodeCmd{ - SubCmd: btcjson.NRemove, + unmarshalled: &dcrjson.NodeCmd{ + SubCmd: dcrjson.NRemove, Target: "1.1.1.1", }, }, { name: "node", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("node", btcjson.NDisconnect, "1.1.1.1") + return dcrjson.NewCmd("node", dcrjson.NDisconnect, "1.1.1.1") }, staticCmd: func() interface{} { - return btcjson.NewNodeCmd("disconnect", "1.1.1.1", nil) + return dcrjson.NewNodeCmd("disconnect", "1.1.1.1", nil) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["disconnect","1.1.1.1"],"id":1}`, - unmarshalled: &btcjson.NodeCmd{ - SubCmd: btcjson.NDisconnect, + unmarshalled: &dcrjson.NodeCmd{ + SubCmd: dcrjson.NDisconnect, Target: "1.1.1.1", }, }, { name: "node", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "perm") + return dcrjson.NewCmd("node", dcrjson.NConnect, "1.1.1.1", "perm") }, staticCmd: func() interface{} { - return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("perm")) + return dcrjson.NewNodeCmd("connect", "1.1.1.1", dcrjson.String("perm")) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","perm"],"id":1}`, - unmarshalled: &btcjson.NodeCmd{ - SubCmd: btcjson.NConnect, + unmarshalled: &dcrjson.NodeCmd{ + SubCmd: dcrjson.NConnect, Target: "1.1.1.1", - ConnectSubCmd: btcjson.String("perm"), + ConnectSubCmd: dcrjson.String("perm"), }, }, { name: "node", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "temp") + return dcrjson.NewCmd("node", dcrjson.NConnect, "1.1.1.1", "temp") }, staticCmd: func() interface{} { - return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("temp")) + return dcrjson.NewNodeCmd("connect", "1.1.1.1", dcrjson.String("temp")) }, marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","temp"],"id":1}`, - unmarshalled: &btcjson.NodeCmd{ - SubCmd: btcjson.NConnect, + unmarshalled: &dcrjson.NodeCmd{ + SubCmd: dcrjson.NConnect, Target: "1.1.1.1", - ConnectSubCmd: btcjson.String("temp"), + ConnectSubCmd: dcrjson.String("temp"), }, }, { name: "generate", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("generate", 1) + return dcrjson.NewCmd("generate", 1) }, staticCmd: func() interface{} { - return btcjson.NewGenerateCmd(1) + return dcrjson.NewGenerateCmd(1) }, marshalled: `{"jsonrpc":"1.0","method":"generate","params":[1],"id":1}`, - unmarshalled: &btcjson.GenerateCmd{ + unmarshalled: &dcrjson.GenerateCmd{ NumBlocks: 1, }, }, { name: "getbestblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getbestblock") + return dcrjson.NewCmd("getbestblock") }, staticCmd: func() interface{} { - return btcjson.NewGetBestBlockCmd() + return dcrjson.NewGetBestBlockCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getbestblock","params":[],"id":1}`, - unmarshalled: &btcjson.GetBestBlockCmd{}, + unmarshalled: &dcrjson.GetBestBlockCmd{}, }, { name: "getcurrentnet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getcurrentnet") + return dcrjson.NewCmd("getcurrentnet") }, staticCmd: func() interface{} { - return btcjson.NewGetCurrentNetCmd() + return dcrjson.NewGetCurrentNetCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getcurrentnet","params":[],"id":1}`, - unmarshalled: &btcjson.GetCurrentNetCmd{}, + unmarshalled: &dcrjson.GetCurrentNetCmd{}, }, } @@ -141,7 +142,7 @@ func TestBtcdExtCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -165,7 +166,7 @@ func TestBtcdExtCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -179,7 +180,7 @@ func TestBtcdExtCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -187,7 +188,7 @@ func TestBtcdExtCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/btcjson/btcwalletextcmds.go b/dcrjson/btcwalletextcmds.go similarity index 98% rename from btcjson/btcwalletextcmds.go rename to dcrjson/btcwalletextcmds.go index 9cbc273a..ed49f6ec 100644 --- a/btcjson/btcwalletextcmds.go +++ b/dcrjson/btcwalletextcmds.go @@ -1,11 +1,12 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by // a wallet server with btcwallet extensions. -package btcjson +package dcrjson // CreateNewAccountCmd defines the createnewaccount JSON-RPC command. type CreateNewAccountCmd struct { diff --git a/btcjson/btcwalletextcmds_test.go b/dcrjson/btcwalletextcmds_test.go similarity index 72% rename from btcjson/btcwalletextcmds_test.go rename to dcrjson/btcwalletextcmds_test.go index e478a088..8a4bc5eb 100644 --- a/btcjson/btcwalletextcmds_test.go +++ b/dcrjson/btcwalletextcmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestBtcWalletExtCmds tests all of the btcwallet extended commands marshal and @@ -32,108 +33,108 @@ func TestBtcWalletExtCmds(t *testing.T) { { name: "createnewaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("createnewaccount", "acct") + return dcrjson.NewCmd("createnewaccount", "acct") }, staticCmd: func() interface{} { - return btcjson.NewCreateNewAccountCmd("acct") + return dcrjson.NewCreateNewAccountCmd("acct") }, marshalled: `{"jsonrpc":"1.0","method":"createnewaccount","params":["acct"],"id":1}`, - unmarshalled: &btcjson.CreateNewAccountCmd{ + unmarshalled: &dcrjson.CreateNewAccountCmd{ Account: "acct", }, }, { name: "dumpwallet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("dumpwallet", "filename") + return dcrjson.NewCmd("dumpwallet", "filename") }, staticCmd: func() interface{} { - return btcjson.NewDumpWalletCmd("filename") + return dcrjson.NewDumpWalletCmd("filename") }, marshalled: `{"jsonrpc":"1.0","method":"dumpwallet","params":["filename"],"id":1}`, - unmarshalled: &btcjson.DumpWalletCmd{ + unmarshalled: &dcrjson.DumpWalletCmd{ Filename: "filename", }, }, { name: "importaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importaddress", "1Address") + return dcrjson.NewCmd("importaddress", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewImportAddressCmd("1Address", nil) + return dcrjson.NewImportAddressCmd("1Address", nil) }, marshalled: `{"jsonrpc":"1.0","method":"importaddress","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.ImportAddressCmd{ + unmarshalled: &dcrjson.ImportAddressCmd{ Address: "1Address", - Rescan: btcjson.Bool(true), + Rescan: dcrjson.Bool(true), }, }, { name: "importaddress optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importaddress", "1Address", false) + return dcrjson.NewCmd("importaddress", "1Address", false) }, staticCmd: func() interface{} { - return btcjson.NewImportAddressCmd("1Address", btcjson.Bool(false)) + return dcrjson.NewImportAddressCmd("1Address", dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"importaddress","params":["1Address",false],"id":1}`, - unmarshalled: &btcjson.ImportAddressCmd{ + unmarshalled: &dcrjson.ImportAddressCmd{ Address: "1Address", - Rescan: btcjson.Bool(false), + Rescan: dcrjson.Bool(false), }, }, { name: "importpubkey", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importpubkey", "031234") + return dcrjson.NewCmd("importpubkey", "031234") }, staticCmd: func() interface{} { - return btcjson.NewImportPubKeyCmd("031234", nil) + return dcrjson.NewImportPubKeyCmd("031234", nil) }, marshalled: `{"jsonrpc":"1.0","method":"importpubkey","params":["031234"],"id":1}`, - unmarshalled: &btcjson.ImportPubKeyCmd{ + unmarshalled: &dcrjson.ImportPubKeyCmd{ PubKey: "031234", - Rescan: btcjson.Bool(true), + Rescan: dcrjson.Bool(true), }, }, { name: "importpubkey optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importpubkey", "031234", false) + return dcrjson.NewCmd("importpubkey", "031234", false) }, staticCmd: func() interface{} { - return btcjson.NewImportPubKeyCmd("031234", btcjson.Bool(false)) + return dcrjson.NewImportPubKeyCmd("031234", dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"importpubkey","params":["031234",false],"id":1}`, - unmarshalled: &btcjson.ImportPubKeyCmd{ + unmarshalled: &dcrjson.ImportPubKeyCmd{ PubKey: "031234", - Rescan: btcjson.Bool(false), + Rescan: dcrjson.Bool(false), }, }, { name: "importwallet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importwallet", "filename") + return dcrjson.NewCmd("importwallet", "filename") }, staticCmd: func() interface{} { - return btcjson.NewImportWalletCmd("filename") + return dcrjson.NewImportWalletCmd("filename") }, marshalled: `{"jsonrpc":"1.0","method":"importwallet","params":["filename"],"id":1}`, - unmarshalled: &btcjson.ImportWalletCmd{ + unmarshalled: &dcrjson.ImportWalletCmd{ Filename: "filename", }, }, { name: "renameaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("renameaccount", "oldacct", "newacct") + return dcrjson.NewCmd("renameaccount", "oldacct", "newacct") }, staticCmd: func() interface{} { - return btcjson.NewRenameAccountCmd("oldacct", "newacct") + return dcrjson.NewRenameAccountCmd("oldacct", "newacct") }, marshalled: `{"jsonrpc":"1.0","method":"renameaccount","params":["oldacct","newacct"],"id":1}`, - unmarshalled: &btcjson.RenameAccountCmd{ + unmarshalled: &dcrjson.RenameAccountCmd{ OldAccount: "oldacct", NewAccount: "newacct", }, @@ -144,7 +145,7 @@ func TestBtcWalletExtCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -168,7 +169,7 @@ func TestBtcWalletExtCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -182,7 +183,7 @@ func TestBtcWalletExtCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -190,7 +191,7 @@ func TestBtcWalletExtCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/btcjson/chainsvrcmds.go b/dcrjson/chainsvrcmds.go similarity index 97% rename from btcjson/chainsvrcmds.go rename to dcrjson/chainsvrcmds.go index 69a81f12..af188b51 100644 --- a/btcjson/chainsvrcmds.go +++ b/dcrjson/chainsvrcmds.go @@ -1,11 +1,12 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by // a chain server. -package btcjson +package dcrjson import ( "encoding/json" @@ -45,22 +46,23 @@ func NewAddNodeCmd(addr string, subCmd AddNodeSubCmd) *AddNodeCmd { } // TransactionInput represents the inputs to a transaction. Specifically a -// transaction hash and output number pair. +// transaction hash and output number pair. Contains Decred additions. type TransactionInput struct { Txid string `json:"txid"` Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` } // CreateRawTransactionCmd defines the createrawtransaction JSON-RPC command. type CreateRawTransactionCmd struct { Inputs []TransactionInput - Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In BTC + Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In DCR } // NewCreateRawTransactionCmd returns a new instance which can be used to issue // a createrawtransaction JSON-RPC command. // -// Amounts are in BTC. +// Amounts are in DCR. func NewCreateRawTransactionCmd(inputs []TransactionInput, amounts map[string]float64) *CreateRawTransactionCmd { return &CreateRawTransactionCmd{ Inputs: inputs, @@ -94,6 +96,19 @@ func NewDecodeScriptCmd(hexScript string) *DecodeScriptCmd { } } +// EstimateFeeCmd defines the estimatefee JSON-RPC command. +type EstimateFeeCmd struct { + NumBlocks int64 +} + +// NewEstimateFeeCmd returns a new instance which can be used to issue a +// estimatefee JSON-RPC command. +func NewEstimateFeeCmd(numBlocks int64) *EstimateFeeCmd { + return &EstimateFeeCmd{ + NumBlocks: numBlocks, + } +} + // GetAddedNodeInfoCmd defines the getaddednodeinfo JSON-RPC command. type GetAddedNodeInfoCmd struct { DNS bool @@ -689,6 +704,7 @@ func init() { MustRegisterCmd("createrawtransaction", (*CreateRawTransactionCmd)(nil), flags) MustRegisterCmd("decoderawtransaction", (*DecodeRawTransactionCmd)(nil), flags) MustRegisterCmd("decodescript", (*DecodeScriptCmd)(nil), flags) + MustRegisterCmd("estimatefee", (*EstimateFeeCmd)(nil), flags) MustRegisterCmd("getaddednodeinfo", (*GetAddedNodeInfoCmd)(nil), flags) MustRegisterCmd("getbestblockhash", (*GetBestBlockHashCmd)(nil), flags) MustRegisterCmd("getblock", (*GetBlockCmd)(nil), flags) diff --git a/btcjson/chainsvrcmds_test.go b/dcrjson/chainsvrcmds_test.go similarity index 61% rename from btcjson/chainsvrcmds_test.go rename to dcrjson/chainsvrcmds_test.go index 38021524..f5b68765 100644 --- a/btcjson/chainsvrcmds_test.go +++ b/dcrjson/chainsvrcmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestChainSvrCmds tests all of the chain server commands marshal and unmarshal @@ -32,104 +33,104 @@ func TestChainSvrCmds(t *testing.T) { { name: "addnode", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("addnode", "127.0.0.1", btcjson.ANRemove) + return dcrjson.NewCmd("addnode", "127.0.0.1", dcrjson.ANRemove) }, staticCmd: func() interface{} { - return btcjson.NewAddNodeCmd("127.0.0.1", btcjson.ANRemove) + return dcrjson.NewAddNodeCmd("127.0.0.1", dcrjson.ANRemove) }, marshalled: `{"jsonrpc":"1.0","method":"addnode","params":["127.0.0.1","remove"],"id":1}`, - unmarshalled: &btcjson.AddNodeCmd{Addr: "127.0.0.1", SubCmd: btcjson.ANRemove}, + unmarshalled: &dcrjson.AddNodeCmd{Addr: "127.0.0.1", SubCmd: dcrjson.ANRemove}, }, { name: "createrawtransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("createrawtransaction", `[{"txid":"123","vout":1}]`, + return dcrjson.NewCmd("createrawtransaction", `[{"txid":"123","vout":1}]`, `{"456":0.0123}`) }, staticCmd: func() interface{} { - txInputs := []btcjson.TransactionInput{ + txInputs := []dcrjson.TransactionInput{ {Txid: "123", Vout: 1}, } amounts := map[string]float64{"456": .0123} - return btcjson.NewCreateRawTransactionCmd(txInputs, amounts) + return dcrjson.NewCreateRawTransactionCmd(txInputs, amounts) }, - marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[{"txid":"123","vout":1}],{"456":0.0123}],"id":1}`, - unmarshalled: &btcjson.CreateRawTransactionCmd{ - Inputs: []btcjson.TransactionInput{{Txid: "123", Vout: 1}}, + marshalled: `{"jsonrpc":"1.0","method":"createrawtransaction","params":[[{"txid":"123","vout":1,"tree":0}],{"456":0.0123}],"id":1}`, + unmarshalled: &dcrjson.CreateRawTransactionCmd{ + Inputs: []dcrjson.TransactionInput{{Txid: "123", Vout: 1}}, Amounts: map[string]float64{"456": .0123}, }, }, { name: "decoderawtransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("decoderawtransaction", "123") + return dcrjson.NewCmd("decoderawtransaction", "123") }, staticCmd: func() interface{} { - return btcjson.NewDecodeRawTransactionCmd("123") + return dcrjson.NewDecodeRawTransactionCmd("123") }, marshalled: `{"jsonrpc":"1.0","method":"decoderawtransaction","params":["123"],"id":1}`, - unmarshalled: &btcjson.DecodeRawTransactionCmd{HexTx: "123"}, + unmarshalled: &dcrjson.DecodeRawTransactionCmd{HexTx: "123"}, }, { name: "decodescript", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("decodescript", "00") + return dcrjson.NewCmd("decodescript", "00") }, staticCmd: func() interface{} { - return btcjson.NewDecodeScriptCmd("00") + return dcrjson.NewDecodeScriptCmd("00") }, marshalled: `{"jsonrpc":"1.0","method":"decodescript","params":["00"],"id":1}`, - unmarshalled: &btcjson.DecodeScriptCmd{HexScript: "00"}, + unmarshalled: &dcrjson.DecodeScriptCmd{HexScript: "00"}, }, { name: "getaddednodeinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getaddednodeinfo", true) + return dcrjson.NewCmd("getaddednodeinfo", true) }, staticCmd: func() interface{} { - return btcjson.NewGetAddedNodeInfoCmd(true, nil) + return dcrjson.NewGetAddedNodeInfoCmd(true, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getaddednodeinfo","params":[true],"id":1}`, - unmarshalled: &btcjson.GetAddedNodeInfoCmd{DNS: true, Node: nil}, + unmarshalled: &dcrjson.GetAddedNodeInfoCmd{DNS: true, Node: nil}, }, { name: "getaddednodeinfo optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getaddednodeinfo", true, "127.0.0.1") + return dcrjson.NewCmd("getaddednodeinfo", true, "127.0.0.1") }, staticCmd: func() interface{} { - return btcjson.NewGetAddedNodeInfoCmd(true, btcjson.String("127.0.0.1")) + return dcrjson.NewGetAddedNodeInfoCmd(true, dcrjson.String("127.0.0.1")) }, marshalled: `{"jsonrpc":"1.0","method":"getaddednodeinfo","params":[true,"127.0.0.1"],"id":1}`, - unmarshalled: &btcjson.GetAddedNodeInfoCmd{ + unmarshalled: &dcrjson.GetAddedNodeInfoCmd{ DNS: true, - Node: btcjson.String("127.0.0.1"), + Node: dcrjson.String("127.0.0.1"), }, }, { name: "getbestblockhash", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getbestblockhash") + return dcrjson.NewCmd("getbestblockhash") }, staticCmd: func() interface{} { - return btcjson.NewGetBestBlockHashCmd() + return dcrjson.NewGetBestBlockHashCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getbestblockhash","params":[],"id":1}`, - unmarshalled: &btcjson.GetBestBlockHashCmd{}, + unmarshalled: &dcrjson.GetBestBlockHashCmd{}, }, { name: "getblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblock", "123") + return dcrjson.NewCmd("getblock", "123") }, staticCmd: func() interface{} { - return btcjson.NewGetBlockCmd("123", nil, nil) + return dcrjson.NewGetBlockCmd("123", nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getblock","params":["123"],"id":1}`, - unmarshalled: &btcjson.GetBlockCmd{ + unmarshalled: &dcrjson.GetBlockCmd{ Hash: "123", - Verbose: btcjson.Bool(true), - VerboseTx: btcjson.Bool(false), + Verbose: dcrjson.Bool(true), + VerboseTx: dcrjson.Bool(false), }, }, { @@ -138,93 +139,93 @@ func TestChainSvrCmds(t *testing.T) { // Intentionally use a source param that is // more pointers than the destination to // exercise that path. - verbosePtr := btcjson.Bool(true) - return btcjson.NewCmd("getblock", "123", &verbosePtr) + verbosePtr := dcrjson.Bool(true) + return dcrjson.NewCmd("getblock", "123", &verbosePtr) }, staticCmd: func() interface{} { - return btcjson.NewGetBlockCmd("123", btcjson.Bool(true), nil) + return dcrjson.NewGetBlockCmd("123", dcrjson.Bool(true), nil) }, marshalled: `{"jsonrpc":"1.0","method":"getblock","params":["123",true],"id":1}`, - unmarshalled: &btcjson.GetBlockCmd{ + unmarshalled: &dcrjson.GetBlockCmd{ Hash: "123", - Verbose: btcjson.Bool(true), - VerboseTx: btcjson.Bool(false), + Verbose: dcrjson.Bool(true), + VerboseTx: dcrjson.Bool(false), }, }, { name: "getblock required optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblock", "123", true, true) + return dcrjson.NewCmd("getblock", "123", true, true) }, staticCmd: func() interface{} { - return btcjson.NewGetBlockCmd("123", btcjson.Bool(true), btcjson.Bool(true)) + return dcrjson.NewGetBlockCmd("123", dcrjson.Bool(true), dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"getblock","params":["123",true,true],"id":1}`, - unmarshalled: &btcjson.GetBlockCmd{ + unmarshalled: &dcrjson.GetBlockCmd{ Hash: "123", - Verbose: btcjson.Bool(true), - VerboseTx: btcjson.Bool(true), + Verbose: dcrjson.Bool(true), + VerboseTx: dcrjson.Bool(true), }, }, { name: "getblockchaininfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblockchaininfo") + return dcrjson.NewCmd("getblockchaininfo") }, staticCmd: func() interface{} { - return btcjson.NewGetBlockChainInfoCmd() + return dcrjson.NewGetBlockChainInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getblockchaininfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetBlockChainInfoCmd{}, + unmarshalled: &dcrjson.GetBlockChainInfoCmd{}, }, { name: "getblockcount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblockcount") + return dcrjson.NewCmd("getblockcount") }, staticCmd: func() interface{} { - return btcjson.NewGetBlockCountCmd() + return dcrjson.NewGetBlockCountCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getblockcount","params":[],"id":1}`, - unmarshalled: &btcjson.GetBlockCountCmd{}, + unmarshalled: &dcrjson.GetBlockCountCmd{}, }, { name: "getblockhash", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblockhash", 123) + return dcrjson.NewCmd("getblockhash", 123) }, staticCmd: func() interface{} { - return btcjson.NewGetBlockHashCmd(123) + return dcrjson.NewGetBlockHashCmd(123) }, marshalled: `{"jsonrpc":"1.0","method":"getblockhash","params":[123],"id":1}`, - unmarshalled: &btcjson.GetBlockHashCmd{Index: 123}, + unmarshalled: &dcrjson.GetBlockHashCmd{Index: 123}, }, { name: "getblocktemplate", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblocktemplate") + return dcrjson.NewCmd("getblocktemplate") }, staticCmd: func() interface{} { - return btcjson.NewGetBlockTemplateCmd(nil) + return dcrjson.NewGetBlockTemplateCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getblocktemplate","params":[],"id":1}`, - unmarshalled: &btcjson.GetBlockTemplateCmd{Request: nil}, + unmarshalled: &dcrjson.GetBlockTemplateCmd{Request: nil}, }, { name: "getblocktemplate optional - template request", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"]}`) + return dcrjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"]}`) }, staticCmd: func() interface{} { - template := btcjson.TemplateRequest{ + template := dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, } - return btcjson.NewGetBlockTemplateCmd(&template) + return dcrjson.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getblocktemplate","params":[{"mode":"template","capabilities":["longpoll","coinbasetxn"]}],"id":1}`, - unmarshalled: &btcjson.GetBlockTemplateCmd{ - Request: &btcjson.TemplateRequest{ + unmarshalled: &dcrjson.GetBlockTemplateCmd{ + Request: &dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, }, @@ -233,21 +234,21 @@ func TestChainSvrCmds(t *testing.T) { { name: "getblocktemplate optional - template request with tweaks", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":500,"sizelimit":100000000,"maxversion":2}`) + return dcrjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":500,"sizelimit":100000000,"maxversion":2}`) }, staticCmd: func() interface{} { - template := btcjson.TemplateRequest{ + template := dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, SigOpLimit: 500, SizeLimit: 100000000, MaxVersion: 2, } - return btcjson.NewGetBlockTemplateCmd(&template) + return dcrjson.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getblocktemplate","params":[{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":500,"sizelimit":100000000,"maxversion":2}],"id":1}`, - unmarshalled: &btcjson.GetBlockTemplateCmd{ - Request: &btcjson.TemplateRequest{ + unmarshalled: &dcrjson.GetBlockTemplateCmd{ + Request: &dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, SigOpLimit: int64(500), @@ -259,21 +260,21 @@ func TestChainSvrCmds(t *testing.T) { { name: "getblocktemplate optional - template request with tweaks 2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":true,"sizelimit":100000000,"maxversion":2}`) + return dcrjson.NewCmd("getblocktemplate", `{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":true,"sizelimit":100000000,"maxversion":2}`) }, staticCmd: func() interface{} { - template := btcjson.TemplateRequest{ + template := dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, SigOpLimit: true, SizeLimit: 100000000, MaxVersion: 2, } - return btcjson.NewGetBlockTemplateCmd(&template) + return dcrjson.NewGetBlockTemplateCmd(&template) }, marshalled: `{"jsonrpc":"1.0","method":"getblocktemplate","params":[{"mode":"template","capabilities":["longpoll","coinbasetxn"],"sigoplimit":true,"sizelimit":100000000,"maxversion":2}],"id":1}`, - unmarshalled: &btcjson.GetBlockTemplateCmd{ - Request: &btcjson.TemplateRequest{ + unmarshalled: &dcrjson.GetBlockTemplateCmd{ + Request: &dcrjson.TemplateRequest{ Mode: "template", Capabilities: []string{"longpoll", "coinbasetxn"}, SigOpLimit: true, @@ -285,524 +286,524 @@ func TestChainSvrCmds(t *testing.T) { { name: "getchaintips", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getchaintips") + return dcrjson.NewCmd("getchaintips") }, staticCmd: func() interface{} { - return btcjson.NewGetChainTipsCmd() + return dcrjson.NewGetChainTipsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getchaintips","params":[],"id":1}`, - unmarshalled: &btcjson.GetChainTipsCmd{}, + unmarshalled: &dcrjson.GetChainTipsCmd{}, }, { name: "getconnectioncount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getconnectioncount") + return dcrjson.NewCmd("getconnectioncount") }, staticCmd: func() interface{} { - return btcjson.NewGetConnectionCountCmd() + return dcrjson.NewGetConnectionCountCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getconnectioncount","params":[],"id":1}`, - unmarshalled: &btcjson.GetConnectionCountCmd{}, + unmarshalled: &dcrjson.GetConnectionCountCmd{}, }, { name: "getdifficulty", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getdifficulty") + return dcrjson.NewCmd("getdifficulty") }, staticCmd: func() interface{} { - return btcjson.NewGetDifficultyCmd() + return dcrjson.NewGetDifficultyCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getdifficulty","params":[],"id":1}`, - unmarshalled: &btcjson.GetDifficultyCmd{}, + unmarshalled: &dcrjson.GetDifficultyCmd{}, }, { name: "getgenerate", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getgenerate") + return dcrjson.NewCmd("getgenerate") }, staticCmd: func() interface{} { - return btcjson.NewGetGenerateCmd() + return dcrjson.NewGetGenerateCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getgenerate","params":[],"id":1}`, - unmarshalled: &btcjson.GetGenerateCmd{}, + unmarshalled: &dcrjson.GetGenerateCmd{}, }, { name: "gethashespersec", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gethashespersec") + return dcrjson.NewCmd("gethashespersec") }, staticCmd: func() interface{} { - return btcjson.NewGetHashesPerSecCmd() + return dcrjson.NewGetHashesPerSecCmd() }, marshalled: `{"jsonrpc":"1.0","method":"gethashespersec","params":[],"id":1}`, - unmarshalled: &btcjson.GetHashesPerSecCmd{}, + unmarshalled: &dcrjson.GetHashesPerSecCmd{}, }, { name: "getinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getinfo") + return dcrjson.NewCmd("getinfo") }, staticCmd: func() interface{} { - return btcjson.NewGetInfoCmd() + return dcrjson.NewGetInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getinfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetInfoCmd{}, + unmarshalled: &dcrjson.GetInfoCmd{}, }, { name: "getmempoolinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getmempoolinfo") + return dcrjson.NewCmd("getmempoolinfo") }, staticCmd: func() interface{} { - return btcjson.NewGetMempoolInfoCmd() + return dcrjson.NewGetMempoolInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getmempoolinfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetMempoolInfoCmd{}, + unmarshalled: &dcrjson.GetMempoolInfoCmd{}, }, { name: "getmininginfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getmininginfo") + return dcrjson.NewCmd("getmininginfo") }, staticCmd: func() interface{} { - return btcjson.NewGetMiningInfoCmd() + return dcrjson.NewGetMiningInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getmininginfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetMiningInfoCmd{}, + unmarshalled: &dcrjson.GetMiningInfoCmd{}, }, { name: "getnetworkinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnetworkinfo") + return dcrjson.NewCmd("getnetworkinfo") }, staticCmd: func() interface{} { - return btcjson.NewGetNetworkInfoCmd() + return dcrjson.NewGetNetworkInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getnetworkinfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetNetworkInfoCmd{}, + unmarshalled: &dcrjson.GetNetworkInfoCmd{}, }, { name: "getnettotals", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnettotals") + return dcrjson.NewCmd("getnettotals") }, staticCmd: func() interface{} { - return btcjson.NewGetNetTotalsCmd() + return dcrjson.NewGetNetTotalsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getnettotals","params":[],"id":1}`, - unmarshalled: &btcjson.GetNetTotalsCmd{}, + unmarshalled: &dcrjson.GetNetTotalsCmd{}, }, { name: "getnetworkhashps", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnetworkhashps") + return dcrjson.NewCmd("getnetworkhashps") }, staticCmd: func() interface{} { - return btcjson.NewGetNetworkHashPSCmd(nil, nil) + return dcrjson.NewGetNetworkHashPSCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getnetworkhashps","params":[],"id":1}`, - unmarshalled: &btcjson.GetNetworkHashPSCmd{ - Blocks: btcjson.Int(120), - Height: btcjson.Int(-1), + unmarshalled: &dcrjson.GetNetworkHashPSCmd{ + Blocks: dcrjson.Int(120), + Height: dcrjson.Int(-1), }, }, { name: "getnetworkhashps optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnetworkhashps", 200) + return dcrjson.NewCmd("getnetworkhashps", 200) }, staticCmd: func() interface{} { - return btcjson.NewGetNetworkHashPSCmd(btcjson.Int(200), nil) + return dcrjson.NewGetNetworkHashPSCmd(dcrjson.Int(200), nil) }, marshalled: `{"jsonrpc":"1.0","method":"getnetworkhashps","params":[200],"id":1}`, - unmarshalled: &btcjson.GetNetworkHashPSCmd{ - Blocks: btcjson.Int(200), - Height: btcjson.Int(-1), + unmarshalled: &dcrjson.GetNetworkHashPSCmd{ + Blocks: dcrjson.Int(200), + Height: dcrjson.Int(-1), }, }, { name: "getnetworkhashps optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnetworkhashps", 200, 123) + return dcrjson.NewCmd("getnetworkhashps", 200, 123) }, staticCmd: func() interface{} { - return btcjson.NewGetNetworkHashPSCmd(btcjson.Int(200), btcjson.Int(123)) + return dcrjson.NewGetNetworkHashPSCmd(dcrjson.Int(200), dcrjson.Int(123)) }, marshalled: `{"jsonrpc":"1.0","method":"getnetworkhashps","params":[200,123],"id":1}`, - unmarshalled: &btcjson.GetNetworkHashPSCmd{ - Blocks: btcjson.Int(200), - Height: btcjson.Int(123), + unmarshalled: &dcrjson.GetNetworkHashPSCmd{ + Blocks: dcrjson.Int(200), + Height: dcrjson.Int(123), }, }, { name: "getpeerinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getpeerinfo") + return dcrjson.NewCmd("getpeerinfo") }, staticCmd: func() interface{} { - return btcjson.NewGetPeerInfoCmd() + return dcrjson.NewGetPeerInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"getpeerinfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetPeerInfoCmd{}, + unmarshalled: &dcrjson.GetPeerInfoCmd{}, }, { name: "getrawmempool", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawmempool") + return dcrjson.NewCmd("getrawmempool") }, staticCmd: func() interface{} { - return btcjson.NewGetRawMempoolCmd(nil) + return dcrjson.NewGetRawMempoolCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getrawmempool","params":[],"id":1}`, - unmarshalled: &btcjson.GetRawMempoolCmd{ - Verbose: btcjson.Bool(false), + unmarshalled: &dcrjson.GetRawMempoolCmd{ + Verbose: dcrjson.Bool(false), }, }, { name: "getrawmempool optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawmempool", false) + return dcrjson.NewCmd("getrawmempool", false) }, staticCmd: func() interface{} { - return btcjson.NewGetRawMempoolCmd(btcjson.Bool(false)) + return dcrjson.NewGetRawMempoolCmd(dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"getrawmempool","params":[false],"id":1}`, - unmarshalled: &btcjson.GetRawMempoolCmd{ - Verbose: btcjson.Bool(false), + unmarshalled: &dcrjson.GetRawMempoolCmd{ + Verbose: dcrjson.Bool(false), }, }, { name: "getrawtransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawtransaction", "123") + return dcrjson.NewCmd("getrawtransaction", "123") }, staticCmd: func() interface{} { - return btcjson.NewGetRawTransactionCmd("123", nil) + return dcrjson.NewGetRawTransactionCmd("123", nil) }, marshalled: `{"jsonrpc":"1.0","method":"getrawtransaction","params":["123"],"id":1}`, - unmarshalled: &btcjson.GetRawTransactionCmd{ + unmarshalled: &dcrjson.GetRawTransactionCmd{ Txid: "123", - Verbose: btcjson.Int(0), + Verbose: dcrjson.Int(0), }, }, { name: "getrawtransaction optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawtransaction", "123", 1) + return dcrjson.NewCmd("getrawtransaction", "123", 1) }, staticCmd: func() interface{} { - return btcjson.NewGetRawTransactionCmd("123", btcjson.Int(1)) + return dcrjson.NewGetRawTransactionCmd("123", dcrjson.Int(1)) }, marshalled: `{"jsonrpc":"1.0","method":"getrawtransaction","params":["123",1],"id":1}`, - unmarshalled: &btcjson.GetRawTransactionCmd{ + unmarshalled: &dcrjson.GetRawTransactionCmd{ Txid: "123", - Verbose: btcjson.Int(1), + Verbose: dcrjson.Int(1), }, }, { name: "gettxout", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettxout", "123", 1) + return dcrjson.NewCmd("gettxout", "123", 1) }, staticCmd: func() interface{} { - return btcjson.NewGetTxOutCmd("123", 1, nil) + return dcrjson.NewGetTxOutCmd("123", 1, nil) }, marshalled: `{"jsonrpc":"1.0","method":"gettxout","params":["123",1],"id":1}`, - unmarshalled: &btcjson.GetTxOutCmd{ + unmarshalled: &dcrjson.GetTxOutCmd{ Txid: "123", Vout: 1, - IncludeMempool: btcjson.Bool(true), + IncludeMempool: dcrjson.Bool(true), }, }, { name: "gettxout optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettxout", "123", 1, true) + return dcrjson.NewCmd("gettxout", "123", 1, true) }, staticCmd: func() interface{} { - return btcjson.NewGetTxOutCmd("123", 1, btcjson.Bool(true)) + return dcrjson.NewGetTxOutCmd("123", 1, dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"gettxout","params":["123",1,true],"id":1}`, - unmarshalled: &btcjson.GetTxOutCmd{ + unmarshalled: &dcrjson.GetTxOutCmd{ Txid: "123", Vout: 1, - IncludeMempool: btcjson.Bool(true), + IncludeMempool: dcrjson.Bool(true), }, }, { name: "gettxoutproof", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettxoutproof", []string{"123", "456"}) + return dcrjson.NewCmd("gettxoutproof", []string{"123", "456"}) }, staticCmd: func() interface{} { - return btcjson.NewGetTxOutProofCmd([]string{"123", "456"}, nil) + return dcrjson.NewGetTxOutProofCmd([]string{"123", "456"}, nil) }, marshalled: `{"jsonrpc":"1.0","method":"gettxoutproof","params":[["123","456"]],"id":1}`, - unmarshalled: &btcjson.GetTxOutProofCmd{ + unmarshalled: &dcrjson.GetTxOutProofCmd{ TxIDs: []string{"123", "456"}, }, }, { name: "gettxoutproof optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettxoutproof", []string{"123", "456"}, - btcjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf")) + return dcrjson.NewCmd("gettxoutproof", []string{"123", "456"}, + dcrjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf")) }, staticCmd: func() interface{} { - return btcjson.NewGetTxOutProofCmd([]string{"123", "456"}, - btcjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf")) + return dcrjson.NewGetTxOutProofCmd([]string{"123", "456"}, + dcrjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf")) }, marshalled: `{"jsonrpc":"1.0","method":"gettxoutproof","params":[["123","456"],` + `"000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf"],"id":1}`, - unmarshalled: &btcjson.GetTxOutProofCmd{ + unmarshalled: &dcrjson.GetTxOutProofCmd{ TxIDs: []string{"123", "456"}, - BlockHash: btcjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf"), + BlockHash: dcrjson.String("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf"), }, }, { name: "gettxoutsetinfo", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettxoutsetinfo") + return dcrjson.NewCmd("gettxoutsetinfo") }, staticCmd: func() interface{} { - return btcjson.NewGetTxOutSetInfoCmd() + return dcrjson.NewGetTxOutSetInfoCmd() }, marshalled: `{"jsonrpc":"1.0","method":"gettxoutsetinfo","params":[],"id":1}`, - unmarshalled: &btcjson.GetTxOutSetInfoCmd{}, + unmarshalled: &dcrjson.GetTxOutSetInfoCmd{}, }, { name: "getwork", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getwork") + return dcrjson.NewCmd("getwork") }, staticCmd: func() interface{} { - return btcjson.NewGetWorkCmd(nil) + return dcrjson.NewGetWorkCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getwork","params":[],"id":1}`, - unmarshalled: &btcjson.GetWorkCmd{ + unmarshalled: &dcrjson.GetWorkCmd{ Data: nil, }, }, { name: "getwork optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getwork", "00112233") + return dcrjson.NewCmd("getwork", "00112233") }, staticCmd: func() interface{} { - return btcjson.NewGetWorkCmd(btcjson.String("00112233")) + return dcrjson.NewGetWorkCmd(dcrjson.String("00112233")) }, marshalled: `{"jsonrpc":"1.0","method":"getwork","params":["00112233"],"id":1}`, - unmarshalled: &btcjson.GetWorkCmd{ - Data: btcjson.String("00112233"), + unmarshalled: &dcrjson.GetWorkCmd{ + Data: dcrjson.String("00112233"), }, }, { name: "help", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("help") + return dcrjson.NewCmd("help") }, staticCmd: func() interface{} { - return btcjson.NewHelpCmd(nil) + return dcrjson.NewHelpCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"help","params":[],"id":1}`, - unmarshalled: &btcjson.HelpCmd{ + unmarshalled: &dcrjson.HelpCmd{ Command: nil, }, }, { name: "help optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("help", "getblock") + return dcrjson.NewCmd("help", "getblock") }, staticCmd: func() interface{} { - return btcjson.NewHelpCmd(btcjson.String("getblock")) + return dcrjson.NewHelpCmd(dcrjson.String("getblock")) }, marshalled: `{"jsonrpc":"1.0","method":"help","params":["getblock"],"id":1}`, - unmarshalled: &btcjson.HelpCmd{ - Command: btcjson.String("getblock"), + unmarshalled: &dcrjson.HelpCmd{ + Command: dcrjson.String("getblock"), }, }, { name: "invalidateblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("invalidateblock", "123") + return dcrjson.NewCmd("invalidateblock", "123") }, staticCmd: func() interface{} { - return btcjson.NewInvalidateBlockCmd("123") + return dcrjson.NewInvalidateBlockCmd("123") }, marshalled: `{"jsonrpc":"1.0","method":"invalidateblock","params":["123"],"id":1}`, - unmarshalled: &btcjson.InvalidateBlockCmd{ + unmarshalled: &dcrjson.InvalidateBlockCmd{ BlockHash: "123", }, }, { name: "ping", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("ping") + return dcrjson.NewCmd("ping") }, staticCmd: func() interface{} { - return btcjson.NewPingCmd() + return dcrjson.NewPingCmd() }, marshalled: `{"jsonrpc":"1.0","method":"ping","params":[],"id":1}`, - unmarshalled: &btcjson.PingCmd{}, + unmarshalled: &dcrjson.PingCmd{}, }, { name: "reconsiderblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("reconsiderblock", "123") + return dcrjson.NewCmd("reconsiderblock", "123") }, staticCmd: func() interface{} { - return btcjson.NewReconsiderBlockCmd("123") + return dcrjson.NewReconsiderBlockCmd("123") }, marshalled: `{"jsonrpc":"1.0","method":"reconsiderblock","params":["123"],"id":1}`, - unmarshalled: &btcjson.ReconsiderBlockCmd{ + unmarshalled: &dcrjson.ReconsiderBlockCmd{ BlockHash: "123", }, }, { name: "searchrawtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("searchrawtransactions", "1Address") + return dcrjson.NewCmd("searchrawtransactions", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewSearchRawTransactionsCmd("1Address", nil, nil, nil) + return dcrjson.NewSearchRawTransactionsCmd("1Address", nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"searchrawtransactions","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.SearchRawTransactionsCmd{ + unmarshalled: &dcrjson.SearchRawTransactionsCmd{ Address: "1Address", - Verbose: btcjson.Int(1), - Skip: btcjson.Int(0), - Count: btcjson.Int(100), + Verbose: dcrjson.Int(1), + Skip: dcrjson.Int(0), + Count: dcrjson.Int(100), }, }, { name: "searchrawtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("searchrawtransactions", "1Address", 0) + return dcrjson.NewCmd("searchrawtransactions", "1Address", 0) }, staticCmd: func() interface{} { - return btcjson.NewSearchRawTransactionsCmd("1Address", - btcjson.Int(0), nil, nil) + return dcrjson.NewSearchRawTransactionsCmd("1Address", + dcrjson.Int(0), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"searchrawtransactions","params":["1Address",0],"id":1}`, - unmarshalled: &btcjson.SearchRawTransactionsCmd{ + unmarshalled: &dcrjson.SearchRawTransactionsCmd{ Address: "1Address", - Verbose: btcjson.Int(0), - Skip: btcjson.Int(0), - Count: btcjson.Int(100), + Verbose: dcrjson.Int(0), + Skip: dcrjson.Int(0), + Count: dcrjson.Int(100), }, }, { name: "searchrawtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("searchrawtransactions", "1Address", 0, 5) + return dcrjson.NewCmd("searchrawtransactions", "1Address", 0, 5) }, staticCmd: func() interface{} { - return btcjson.NewSearchRawTransactionsCmd("1Address", - btcjson.Int(0), btcjson.Int(5), nil) + return dcrjson.NewSearchRawTransactionsCmd("1Address", + dcrjson.Int(0), dcrjson.Int(5), nil) }, marshalled: `{"jsonrpc":"1.0","method":"searchrawtransactions","params":["1Address",0,5],"id":1}`, - unmarshalled: &btcjson.SearchRawTransactionsCmd{ + unmarshalled: &dcrjson.SearchRawTransactionsCmd{ Address: "1Address", - Verbose: btcjson.Int(0), - Skip: btcjson.Int(5), - Count: btcjson.Int(100), + Verbose: dcrjson.Int(0), + Skip: dcrjson.Int(5), + Count: dcrjson.Int(100), }, }, { name: "searchrawtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("searchrawtransactions", "1Address", 0, 5, 10) + return dcrjson.NewCmd("searchrawtransactions", "1Address", 0, 5, 10) }, staticCmd: func() interface{} { - return btcjson.NewSearchRawTransactionsCmd("1Address", - btcjson.Int(0), btcjson.Int(5), btcjson.Int(10)) + return dcrjson.NewSearchRawTransactionsCmd("1Address", + dcrjson.Int(0), dcrjson.Int(5), dcrjson.Int(10)) }, marshalled: `{"jsonrpc":"1.0","method":"searchrawtransactions","params":["1Address",0,5,10],"id":1}`, - unmarshalled: &btcjson.SearchRawTransactionsCmd{ + unmarshalled: &dcrjson.SearchRawTransactionsCmd{ Address: "1Address", - Verbose: btcjson.Int(0), - Skip: btcjson.Int(5), - Count: btcjson.Int(10), + Verbose: dcrjson.Int(0), + Skip: dcrjson.Int(5), + Count: dcrjson.Int(10), }, }, { name: "sendrawtransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendrawtransaction", "1122") + return dcrjson.NewCmd("sendrawtransaction", "1122") }, staticCmd: func() interface{} { - return btcjson.NewSendRawTransactionCmd("1122", nil) + return dcrjson.NewSendRawTransactionCmd("1122", nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendrawtransaction","params":["1122"],"id":1}`, - unmarshalled: &btcjson.SendRawTransactionCmd{ + unmarshalled: &dcrjson.SendRawTransactionCmd{ HexTx: "1122", - AllowHighFees: btcjson.Bool(false), + AllowHighFees: dcrjson.Bool(false), }, }, { name: "sendrawtransaction optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendrawtransaction", "1122", false) + return dcrjson.NewCmd("sendrawtransaction", "1122", false) }, staticCmd: func() interface{} { - return btcjson.NewSendRawTransactionCmd("1122", btcjson.Bool(false)) + return dcrjson.NewSendRawTransactionCmd("1122", dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"sendrawtransaction","params":["1122",false],"id":1}`, - unmarshalled: &btcjson.SendRawTransactionCmd{ + unmarshalled: &dcrjson.SendRawTransactionCmd{ HexTx: "1122", - AllowHighFees: btcjson.Bool(false), + AllowHighFees: dcrjson.Bool(false), }, }, { name: "setgenerate", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("setgenerate", true) + return dcrjson.NewCmd("setgenerate", true) }, staticCmd: func() interface{} { - return btcjson.NewSetGenerateCmd(true, nil) + return dcrjson.NewSetGenerateCmd(true, nil) }, marshalled: `{"jsonrpc":"1.0","method":"setgenerate","params":[true],"id":1}`, - unmarshalled: &btcjson.SetGenerateCmd{ + unmarshalled: &dcrjson.SetGenerateCmd{ Generate: true, - GenProcLimit: btcjson.Int(-1), + GenProcLimit: dcrjson.Int(-1), }, }, { name: "setgenerate optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("setgenerate", true, 6) + return dcrjson.NewCmd("setgenerate", true, 6) }, staticCmd: func() interface{} { - return btcjson.NewSetGenerateCmd(true, btcjson.Int(6)) + return dcrjson.NewSetGenerateCmd(true, dcrjson.Int(6)) }, marshalled: `{"jsonrpc":"1.0","method":"setgenerate","params":[true,6],"id":1}`, - unmarshalled: &btcjson.SetGenerateCmd{ + unmarshalled: &dcrjson.SetGenerateCmd{ Generate: true, - GenProcLimit: btcjson.Int(6), + GenProcLimit: dcrjson.Int(6), }, }, { name: "stop", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("stop") + return dcrjson.NewCmd("stop") }, staticCmd: func() interface{} { - return btcjson.NewStopCmd() + return dcrjson.NewStopCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stop","params":[],"id":1}`, - unmarshalled: &btcjson.StopCmd{}, + unmarshalled: &dcrjson.StopCmd{}, }, { name: "submitblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("submitblock", "112233") + return dcrjson.NewCmd("submitblock", "112233") }, staticCmd: func() interface{} { - return btcjson.NewSubmitBlockCmd("112233", nil) + return dcrjson.NewSubmitBlockCmd("112233", nil) }, marshalled: `{"jsonrpc":"1.0","method":"submitblock","params":["112233"],"id":1}`, - unmarshalled: &btcjson.SubmitBlockCmd{ + unmarshalled: &dcrjson.SubmitBlockCmd{ HexBlock: "112233", Options: nil, }, @@ -810,18 +811,18 @@ func TestChainSvrCmds(t *testing.T) { { name: "submitblock optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("submitblock", "112233", `{"workid":"12345"}`) + return dcrjson.NewCmd("submitblock", "112233", `{"workid":"12345"}`) }, staticCmd: func() interface{} { - options := btcjson.SubmitBlockOptions{ + options := dcrjson.SubmitBlockOptions{ WorkID: "12345", } - return btcjson.NewSubmitBlockCmd("112233", &options) + return dcrjson.NewSubmitBlockCmd("112233", &options) }, marshalled: `{"jsonrpc":"1.0","method":"submitblock","params":["112233",{"workid":"12345"}],"id":1}`, - unmarshalled: &btcjson.SubmitBlockCmd{ + unmarshalled: &dcrjson.SubmitBlockCmd{ HexBlock: "112233", - Options: &btcjson.SubmitBlockOptions{ + Options: &dcrjson.SubmitBlockOptions{ WorkID: "12345", }, }, @@ -829,68 +830,68 @@ func TestChainSvrCmds(t *testing.T) { { name: "validateaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("validateaddress", "1Address") + return dcrjson.NewCmd("validateaddress", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewValidateAddressCmd("1Address") + return dcrjson.NewValidateAddressCmd("1Address") }, marshalled: `{"jsonrpc":"1.0","method":"validateaddress","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.ValidateAddressCmd{ + unmarshalled: &dcrjson.ValidateAddressCmd{ Address: "1Address", }, }, { name: "verifychain", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain") + return dcrjson.NewCmd("verifychain") }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(nil, nil) + return dcrjson.NewVerifyChainCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ - CheckLevel: btcjson.Int32(3), - CheckDepth: btcjson.Int32(288), + unmarshalled: &dcrjson.VerifyChainCmd{ + CheckLevel: dcrjson.Int32(3), + CheckDepth: dcrjson.Int32(288), }, }, { name: "verifychain optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain", 2) + return dcrjson.NewCmd("verifychain", 2) }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(btcjson.Int32(2), nil) + return dcrjson.NewVerifyChainCmd(dcrjson.Int32(2), nil) }, marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[2],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ - CheckLevel: btcjson.Int32(2), - CheckDepth: btcjson.Int32(288), + unmarshalled: &dcrjson.VerifyChainCmd{ + CheckLevel: dcrjson.Int32(2), + CheckDepth: dcrjson.Int32(288), }, }, { name: "verifychain optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifychain", 2, 500) + return dcrjson.NewCmd("verifychain", 2, 500) }, staticCmd: func() interface{} { - return btcjson.NewVerifyChainCmd(btcjson.Int32(2), btcjson.Int32(500)) + return dcrjson.NewVerifyChainCmd(dcrjson.Int32(2), dcrjson.Int32(500)) }, marshalled: `{"jsonrpc":"1.0","method":"verifychain","params":[2,500],"id":1}`, - unmarshalled: &btcjson.VerifyChainCmd{ - CheckLevel: btcjson.Int32(2), - CheckDepth: btcjson.Int32(500), + unmarshalled: &dcrjson.VerifyChainCmd{ + CheckLevel: dcrjson.Int32(2), + CheckDepth: dcrjson.Int32(500), }, }, { name: "verifymessage", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifymessage", "1Address", "301234", "test") + return dcrjson.NewCmd("verifymessage", "1Address", "301234", "test") }, staticCmd: func() interface{} { - return btcjson.NewVerifyMessageCmd("1Address", "301234", "test") + return dcrjson.NewVerifyMessageCmd("1Address", "301234", "test") }, marshalled: `{"jsonrpc":"1.0","method":"verifymessage","params":["1Address","301234","test"],"id":1}`, - unmarshalled: &btcjson.VerifyMessageCmd{ + unmarshalled: &dcrjson.VerifyMessageCmd{ Address: "1Address", Signature: "301234", Message: "test", @@ -899,13 +900,13 @@ func TestChainSvrCmds(t *testing.T) { { name: "verifytxoutproof", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("verifytxoutproof", "test") + return dcrjson.NewCmd("verifytxoutproof", "test") }, staticCmd: func() interface{} { - return btcjson.NewVerifyTxOutProofCmd("test") + return dcrjson.NewVerifyTxOutProofCmd("test") }, marshalled: `{"jsonrpc":"1.0","method":"verifytxoutproof","params":["test"],"id":1}`, - unmarshalled: &btcjson.VerifyTxOutProofCmd{ + unmarshalled: &dcrjson.VerifyTxOutProofCmd{ Proof: "test", }, }, @@ -915,7 +916,7 @@ func TestChainSvrCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -939,7 +940,7 @@ func TestChainSvrCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -953,7 +954,7 @@ func TestChainSvrCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -961,7 +962,7 @@ func TestChainSvrCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -991,21 +992,21 @@ func TestChainSvrCmdErrors(t *testing.T) { }{ { name: "template request with invalid type", - result: &btcjson.TemplateRequest{}, + result: &dcrjson.TemplateRequest{}, marshalled: `{"mode":1}`, err: &json.UnmarshalTypeError{}, }, { name: "invalid template request sigoplimit field", - result: &btcjson.TemplateRequest{}, + result: &dcrjson.TemplateRequest{}, marshalled: `{"sigoplimit":"invalid"}`, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid template request sizelimit field", - result: &btcjson.TemplateRequest{}, + result: &dcrjson.TemplateRequest{}, marshalled: `{"sizelimit":"invalid"}`, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, } @@ -1018,12 +1019,12 @@ func TestChainSvrCmdErrors(t *testing.T) { continue } - if terr, ok := test.err.(btcjson.Error); ok { - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != terr.ErrorCode { + if terr, ok := test.err.(dcrjson.Error); ok { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != terr.Code { t.Errorf("Test #%d (%s) mismatched error code "+ "- got %v (%v), want %v", i, test.name, - gotErrorCode, terr, terr.ErrorCode) + gotErrorCode, terr, terr.Code) continue } } diff --git a/btcjson/chainsvrresults.go b/dcrjson/chainsvrresults.go similarity index 81% rename from btcjson/chainsvrresults.go rename to dcrjson/chainsvrresults.go index 168d0034..fcf373b8 100644 --- a/btcjson/chainsvrresults.go +++ b/dcrjson/chainsvrresults.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import "encoding/json" // GetBlockVerboseResult models the data from the getblock command when the // verbose flag is set. When the verbose flag is not set, getblock returns a -// hex-encoded string. +// hex-encoded string. Contains Decred additions. type GetBlockVerboseResult struct { Hash string `json:"hash"` Confirmations uint64 `json:"confirmations"` @@ -16,12 +17,23 @@ type GetBlockVerboseResult struct { Height int64 `json:"height"` Version int32 `json:"version"` MerkleRoot string `json:"merkleroot"` + StakeRoot string `json:"stakeroot"` Tx []string `json:"tx,omitempty"` RawTx []TxRawResult `json:"rawtx,omitempty"` + STx []string `json:"stx,omitempty"` + RawSTx []TxRawResult `json:"rawstx,omitempty"` Time int64 `json:"time"` Nonce uint32 `json:"nonce"` + VoteBits uint16 `json:"votebits"` + FinalState string `json:"finalstate"` + Voters uint16 `json:"voters"` + FreshStake uint8 `json:"freshstake"` + Revocations uint8 `json:"revocations"` + PoolSize uint32 `json:"poolsize"` Bits string `json:"bits"` + SBits float64 `json:"sbits"` Difficulty float64 `json:"difficulty"` + ExtraData string `json:"extradata"` PreviousHash string `json:"previousblockhash"` NextHash string `json:"nextblockhash,omitempty"` } @@ -76,6 +88,7 @@ type GetBlockTemplateResultTx struct { Depends []int64 `json:"depends"` Fee int64 `json:"fee"` SigOps int64 `json:"sigops"` + TxType string `json:"txtype"` } // GetBlockTemplateResultAux models the coinbaseaux field of the @@ -89,14 +102,14 @@ type GetBlockTemplateResultAux struct { type GetBlockTemplateResult struct { // Base fields from BIP 0022. CoinbaseAux is optional. One of // CoinbaseTxn or CoinbaseValue must be specified, but not both. - Bits string `json:"bits"` - CurTime int64 `json:"curtime"` - Height int64 `json:"height"` - PreviousHash string `json:"previousblockhash"` + // GBT has been modified from the Bitcoin semantics to include + // the header rather than various components which are all part + // of the header anyway. + Header string `json:"header"` SigOpLimit int64 `json:"sigoplimit,omitempty"` SizeLimit int64 `json:"sizelimit,omitempty"` Transactions []GetBlockTemplateResultTx `json:"transactions"` - Version int32 `json:"version"` + STransactions []GetBlockTemplateResultTx `json:"stransactions"` CoinbaseAux *GetBlockTemplateResultAux `json:"coinbaseaux,omitempty"` CoinbaseTxn *GetBlockTemplateResultTx `json:"coinbasetxn,omitempty"` CoinbaseValue *int64 `json:"coinbasevalue,omitempty"` @@ -209,11 +222,15 @@ type ScriptSig struct { // getrawtransaction, decoderawtransaction, and searchrawtransaction use the // same structure. type Vin struct { - Coinbase string `json:"coinbase"` - Txid string `json:"txid"` - Vout uint32 `json:"vout"` - ScriptSig *ScriptSig `json:"scriptSig"` - Sequence uint32 `json:"sequence"` + Coinbase string `json:"coinbase"` + Txid string `json:"txid"` + Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` + Sequence uint32 `json:"sequence"` + AmountIn int64 `json:"amountin"` + BlockHeight uint32 `json:"blockheight"` + BlockIndex uint32 `json:"blockindex"` + ScriptSig *ScriptSig `json:"scriptSig"` } // IsCoinBase returns a bool to show if a Vin is a Coinbase one or not. @@ -225,25 +242,39 @@ func (v *Vin) IsCoinBase() bool { func (v *Vin) MarshalJSON() ([]byte, error) { if v.IsCoinBase() { coinbaseStruct := struct { - Coinbase string `json:"coinbase"` - Sequence uint32 `json:"sequence"` + AmountIn int64 `json:"amountin"` + BlockHeight uint32 `json:"blockheight"` + BlockIndex uint32 `json:"blockindex"` + Coinbase string `json:"coinbase"` + Sequence uint32 `json:"sequence"` }{ - Coinbase: v.Coinbase, - Sequence: v.Sequence, + AmountIn: v.AmountIn, + BlockHeight: v.BlockHeight, + BlockIndex: v.BlockIndex, + Coinbase: v.Coinbase, + Sequence: v.Sequence, } return json.Marshal(coinbaseStruct) } txStruct := struct { - Txid string `json:"txid"` - Vout uint32 `json:"vout"` - ScriptSig *ScriptSig `json:"scriptSig"` - Sequence uint32 `json:"sequence"` + Txid string `json:"txid"` + Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` + Sequence uint32 `json:"sequence"` + AmountIn int64 `json:"amountin"` + BlockHeight uint32 `json:"blockheight"` + BlockIndex uint32 `json:"blockindex"` + ScriptSig *ScriptSig `json:"scriptSig"` }{ - Txid: v.Txid, - Vout: v.Vout, - ScriptSig: v.ScriptSig, - Sequence: v.Sequence, + Txid: v.Txid, + Vout: v.Vout, + Tree: v.Tree, + Sequence: v.Sequence, + AmountIn: v.AmountIn, + BlockHeight: v.BlockHeight, + BlockIndex: v.BlockIndex, + ScriptSig: v.ScriptSig, } return json.Marshal(txStruct) } @@ -253,15 +284,18 @@ func (v *Vin) MarshalJSON() ([]byte, error) { type Vout struct { Value float64 `json:"value"` N uint32 `json:"n"` + Version uint16 `json:"version"` ScriptPubKey ScriptPubKeyResult `json:"scriptPubKey"` } // GetMiningInfoResult models the data from the getmininginfo command. +// Contains Decred additions. type GetMiningInfoResult struct { Blocks int64 `json:"blocks"` CurrentBlockSize uint64 `json:"currentblocksize"` CurrentBlockTx uint64 `json:"currentblocktx"` Difficulty float64 `json:"difficulty"` + StakeDifficulty int64 `json:"stakedifficulty"` Errors string `json:"errors"` Generate bool `json:"generate"` GenProcLimit int32 `json:"genproclimit"` @@ -273,10 +307,8 @@ type GetMiningInfoResult struct { // GetWorkResult models the data from the getwork command. type GetWorkResult struct { - Data string `json:"data"` - Hash1 string `json:"hash1"` - Midstate string `json:"midstate"` - Target string `json:"target"` + Data string `json:"data"` + Target string `json:"target"` } // InfoChainResult models the data returned by the chain server getinfo command. @@ -315,9 +347,12 @@ type TxRawResult struct { Txid string `json:"txid"` Version int32 `json:"version"` LockTime uint32 `json:"locktime"` + Expiry uint32 `json:"expiry"` Vin []Vin `json:"vin"` Vout []Vout `json:"vout"` BlockHash string `json:"blockhash,omitempty"` + BlockHeight int64 `json:"blockheight"` + BlockIndex uint32 `json:"blockindex"` Confirmations uint64 `json:"confirmations,omitempty"` Time int64 `json:"time,omitempty"` Blocktime int64 `json:"blocktime,omitempty"` @@ -328,6 +363,7 @@ type TxRawDecodeResult struct { Txid string `json:"txid"` Version int32 `json:"version"` Locktime uint32 `json:"locktime"` + Expiry uint32 `json:"expiry"` Vin []Vin `json:"vin"` Vout []Vout `json:"vout"` } diff --git a/btcjson/chainsvrresults_test.go b/dcrjson/chainsvrresults_test.go similarity index 72% rename from btcjson/chainsvrresults_test.go rename to dcrjson/chainsvrresults_test.go index e6e72cfd..51b9ff3e 100644 --- a/btcjson/chainsvrresults_test.go +++ b/dcrjson/chainsvrresults_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "encoding/json" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestChainSvrCustomResults ensures any results that have custom marshalling @@ -24,24 +25,25 @@ func TestChainSvrCustomResults(t *testing.T) { }{ { name: "custom vin marshal with coinbase", - result: &btcjson.Vin{ + result: &dcrjson.Vin{ Coinbase: "021234", Sequence: 4294967295, }, - expected: `{"coinbase":"021234","sequence":4294967295}`, + expected: `{"amountin":0,"blockheight":0,"blockindex":0,"coinbase":"021234","sequence":4294967295}`, }, { name: "custom vin marshal without coinbase", - result: &btcjson.Vin{ + result: &dcrjson.Vin{ Txid: "123", Vout: 1, - ScriptSig: &btcjson.ScriptSig{ + Tree: 0, + ScriptSig: &dcrjson.ScriptSig{ Asm: "0", Hex: "00", }, Sequence: 4294967295, }, - expected: `{"txid":"123","vout":1,"scriptSig":{"asm":"0","hex":"00"},"sequence":4294967295}`, + expected: `{"txid":"123","vout":1,"tree":0,"sequence":4294967295,"amountin":0,"blockheight":0,"blockindex":0,"scriptSig":{"asm":"0","hex":"00"}}`, }, } diff --git a/btcjson/chainsvrwscmds.go b/dcrjson/chainsvrwscmds.go similarity index 91% rename from btcjson/chainsvrwscmds.go rename to dcrjson/chainsvrwscmds.go index ba8a5391..05aaf1a2 100644 --- a/btcjson/chainsvrwscmds.go +++ b/dcrjson/chainsvrwscmds.go @@ -1,11 +1,12 @@ -// Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by // a chain server, but are only available via websockets. -package btcjson +package dcrjson // AuthenticateCmd defines the authenticate JSON-RPC command. type AuthenticateCmd struct { @@ -56,6 +57,15 @@ func NewNotifyNewTransactionsCmd(verbose *bool) *NotifyNewTransactionsCmd { } } +// SessionCmd defines the session JSON-RPC command. +type SessionCmd struct{} + +// NewSessionCmd returns a new instance which can be used to issue a session +// JSON-RPC command. +func NewSessionCmd() *SessionCmd { + return &SessionCmd{} +} + // StopNotifyNewTransactionsCmd defines the stopnotifynewtransactions JSON-RPC command. type StopNotifyNewTransactionsCmd struct{} @@ -82,9 +92,10 @@ func NewNotifyReceivedCmd(addresses []string) *NotifyReceivedCmd { } // OutPoint describes a transaction outpoint that will be marshalled to and -// from JSON. +// from JSON. Contains Decred addition. type OutPoint struct { Hash string `json:"hash"` + Tree int8 `json:"tree"` Index uint32 `json:"index"` } @@ -158,6 +169,7 @@ func init() { MustRegisterCmd("notifynewtransactions", (*NotifyNewTransactionsCmd)(nil), flags) MustRegisterCmd("notifyreceived", (*NotifyReceivedCmd)(nil), flags) MustRegisterCmd("notifyspent", (*NotifySpentCmd)(nil), flags) + MustRegisterCmd("session", (*SessionCmd)(nil), flags) MustRegisterCmd("stopnotifyblocks", (*StopNotifyBlocksCmd)(nil), flags) MustRegisterCmd("stopnotifynewtransactions", (*StopNotifyNewTransactionsCmd)(nil), flags) MustRegisterCmd("stopnotifyspent", (*StopNotifySpentCmd)(nil), flags) diff --git a/btcjson/chainsvrwscmds_test.go b/dcrjson/chainsvrwscmds_test.go similarity index 64% rename from btcjson/chainsvrwscmds_test.go rename to dcrjson/chainsvrwscmds_test.go index 27d73c2b..075fff07 100644 --- a/btcjson/chainsvrwscmds_test.go +++ b/dcrjson/chainsvrwscmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestChainSvrWsCmds tests all of the chain server websocket-specific commands @@ -32,164 +33,164 @@ func TestChainSvrWsCmds(t *testing.T) { { name: "authenticate", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("authenticate", "user", "pass") + return dcrjson.NewCmd("authenticate", "user", "pass") }, staticCmd: func() interface{} { - return btcjson.NewAuthenticateCmd("user", "pass") + return dcrjson.NewAuthenticateCmd("user", "pass") }, marshalled: `{"jsonrpc":"1.0","method":"authenticate","params":["user","pass"],"id":1}`, - unmarshalled: &btcjson.AuthenticateCmd{Username: "user", Passphrase: "pass"}, + unmarshalled: &dcrjson.AuthenticateCmd{Username: "user", Passphrase: "pass"}, }, { name: "notifyblocks", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("notifyblocks") + return dcrjson.NewCmd("notifyblocks") }, staticCmd: func() interface{} { - return btcjson.NewNotifyBlocksCmd() + return dcrjson.NewNotifyBlocksCmd() }, marshalled: `{"jsonrpc":"1.0","method":"notifyblocks","params":[],"id":1}`, - unmarshalled: &btcjson.NotifyBlocksCmd{}, + unmarshalled: &dcrjson.NotifyBlocksCmd{}, }, { name: "stopnotifyblocks", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("stopnotifyblocks") + return dcrjson.NewCmd("stopnotifyblocks") }, staticCmd: func() interface{} { - return btcjson.NewStopNotifyBlocksCmd() + return dcrjson.NewStopNotifyBlocksCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stopnotifyblocks","params":[],"id":1}`, - unmarshalled: &btcjson.StopNotifyBlocksCmd{}, + unmarshalled: &dcrjson.StopNotifyBlocksCmd{}, }, { name: "notifynewtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("notifynewtransactions") + return dcrjson.NewCmd("notifynewtransactions") }, staticCmd: func() interface{} { - return btcjson.NewNotifyNewTransactionsCmd(nil) + return dcrjson.NewNotifyNewTransactionsCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"notifynewtransactions","params":[],"id":1}`, - unmarshalled: &btcjson.NotifyNewTransactionsCmd{ - Verbose: btcjson.Bool(false), + unmarshalled: &dcrjson.NotifyNewTransactionsCmd{ + Verbose: dcrjson.Bool(false), }, }, { name: "notifynewtransactions optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("notifynewtransactions", true) + return dcrjson.NewCmd("notifynewtransactions", true) }, staticCmd: func() interface{} { - return btcjson.NewNotifyNewTransactionsCmd(btcjson.Bool(true)) + return dcrjson.NewNotifyNewTransactionsCmd(dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"notifynewtransactions","params":[true],"id":1}`, - unmarshalled: &btcjson.NotifyNewTransactionsCmd{ - Verbose: btcjson.Bool(true), + unmarshalled: &dcrjson.NotifyNewTransactionsCmd{ + Verbose: dcrjson.Bool(true), }, }, { name: "stopnotifynewtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("stopnotifynewtransactions") + return dcrjson.NewCmd("stopnotifynewtransactions") }, staticCmd: func() interface{} { - return btcjson.NewStopNotifyNewTransactionsCmd() + return dcrjson.NewStopNotifyNewTransactionsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"stopnotifynewtransactions","params":[],"id":1}`, - unmarshalled: &btcjson.StopNotifyNewTransactionsCmd{}, + unmarshalled: &dcrjson.StopNotifyNewTransactionsCmd{}, }, { name: "notifyreceived", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("notifyreceived", []string{"1Address"}) + return dcrjson.NewCmd("notifyreceived", []string{"1Address"}) }, staticCmd: func() interface{} { - return btcjson.NewNotifyReceivedCmd([]string{"1Address"}) + return dcrjson.NewNotifyReceivedCmd([]string{"1Address"}) }, marshalled: `{"jsonrpc":"1.0","method":"notifyreceived","params":[["1Address"]],"id":1}`, - unmarshalled: &btcjson.NotifyReceivedCmd{ + unmarshalled: &dcrjson.NotifyReceivedCmd{ Addresses: []string{"1Address"}, }, }, { name: "stopnotifyreceived", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("stopnotifyreceived", []string{"1Address"}) + return dcrjson.NewCmd("stopnotifyreceived", []string{"1Address"}) }, staticCmd: func() interface{} { - return btcjson.NewStopNotifyReceivedCmd([]string{"1Address"}) + return dcrjson.NewStopNotifyReceivedCmd([]string{"1Address"}) }, marshalled: `{"jsonrpc":"1.0","method":"stopnotifyreceived","params":[["1Address"]],"id":1}`, - unmarshalled: &btcjson.StopNotifyReceivedCmd{ + unmarshalled: &dcrjson.StopNotifyReceivedCmd{ Addresses: []string{"1Address"}, }, }, { name: "notifyspent", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("notifyspent", `[{"hash":"123","index":0}]`) + return dcrjson.NewCmd("notifyspent", `[{"hash":"123","index":0}]`) }, staticCmd: func() interface{} { - ops := []btcjson.OutPoint{{Hash: "123", Index: 0}} - return btcjson.NewNotifySpentCmd(ops) + ops := []dcrjson.OutPoint{{Hash: "123", Index: 0}} + return dcrjson.NewNotifySpentCmd(ops) }, - marshalled: `{"jsonrpc":"1.0","method":"notifyspent","params":[[{"hash":"123","index":0}]],"id":1}`, - unmarshalled: &btcjson.NotifySpentCmd{ - OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}}, + marshalled: `{"jsonrpc":"1.0","method":"notifyspent","params":[[{"hash":"123","tree":0,"index":0}]],"id":1}`, + unmarshalled: &dcrjson.NotifySpentCmd{ + OutPoints: []dcrjson.OutPoint{{Hash: "123", Index: 0}}, }, }, { name: "stopnotifyspent", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("stopnotifyspent", `[{"hash":"123","index":0}]`) + return dcrjson.NewCmd("stopnotifyspent", `[{"hash":"123","index":0}]`) }, staticCmd: func() interface{} { - ops := []btcjson.OutPoint{{Hash: "123", Index: 0}} - return btcjson.NewStopNotifySpentCmd(ops) + ops := []dcrjson.OutPoint{{Hash: "123", Index: 0}} + return dcrjson.NewStopNotifySpentCmd(ops) }, - marshalled: `{"jsonrpc":"1.0","method":"stopnotifyspent","params":[[{"hash":"123","index":0}]],"id":1}`, - unmarshalled: &btcjson.StopNotifySpentCmd{ - OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}}, + marshalled: `{"jsonrpc":"1.0","method":"stopnotifyspent","params":[[{"hash":"123","tree":0,"index":0}]],"id":1}`, + unmarshalled: &dcrjson.StopNotifySpentCmd{ + OutPoints: []dcrjson.OutPoint{{Hash: "123", Index: 0}}, }, }, { name: "rescan", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`) + return dcrjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","tree":0,"index":0}]`) }, staticCmd: func() interface{} { addrs := []string{"1Address"} - ops := []btcjson.OutPoint{{ + ops := []dcrjson.OutPoint{{ Hash: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0, }} - return btcjson.NewRescanCmd("123", addrs, ops, nil) + return dcrjson.NewRescanCmd("123", addrs, ops, nil) }, - marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]],"id":1}`, - unmarshalled: &btcjson.RescanCmd{ + marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","tree":0,"index":0}]],"id":1}`, + unmarshalled: &dcrjson.RescanCmd{ BeginBlock: "123", Addresses: []string{"1Address"}, - OutPoints: []btcjson.OutPoint{{Hash: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}}, + OutPoints: []dcrjson.OutPoint{{Hash: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}}, EndBlock: nil, }, }, { name: "rescan optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"123","index":0}]`, "456") + return dcrjson.NewCmd("rescan", "123", `["1Address"]`, `[{"hash":"123","tree":0,"index":0}]`, "456") }, staticCmd: func() interface{} { addrs := []string{"1Address"} - ops := []btcjson.OutPoint{{Hash: "123", Index: 0}} - return btcjson.NewRescanCmd("123", addrs, ops, btcjson.String("456")) + ops := []dcrjson.OutPoint{{Hash: "123", Index: 0}} + return dcrjson.NewRescanCmd("123", addrs, ops, dcrjson.String("456")) }, - marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"123","index":0}],"456"],"id":1}`, - unmarshalled: &btcjson.RescanCmd{ + marshalled: `{"jsonrpc":"1.0","method":"rescan","params":["123",["1Address"],[{"hash":"123","tree":0,"index":0}],"456"],"id":1}`, + unmarshalled: &dcrjson.RescanCmd{ BeginBlock: "123", Addresses: []string{"1Address"}, - OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}}, - EndBlock: btcjson.String("456"), + OutPoints: []dcrjson.OutPoint{{Hash: "123", Index: 0}}, + EndBlock: dcrjson.String("456"), }, }, } @@ -198,7 +199,7 @@ func TestChainSvrWsCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -222,7 +223,7 @@ func TestChainSvrWsCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -236,7 +237,7 @@ func TestChainSvrWsCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -244,7 +245,7 @@ func TestChainSvrWsCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/btcjson/chainsvrwsntfns.go b/dcrjson/chainsvrwsntfns.go similarity index 80% rename from btcjson/chainsvrwsntfns.go rename to dcrjson/chainsvrwsntfns.go index c7e9b5aa..f8900192 100644 --- a/btcjson/chainsvrwsntfns.go +++ b/dcrjson/chainsvrwsntfns.go @@ -1,11 +1,12 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC websocket notifications that are // supported by a chain server. -package btcjson +package dcrjson const ( // BlockConnectedNtfnMethod is the method used for notifications from @@ -16,6 +17,10 @@ const ( // the chain server that a block has been disconnected. BlockDisconnectedNtfnMethod = "blockdisconnected" + // ReorganizationNtfnMethod is the method used for notifications that the + // block chain is in the process of a reorganization. + ReorganizationNtfnMethod = "reorganization" + // RecvTxNtfnMethod is the method used for notifications from the chain // server that a transaction which pays to a registered address has been // processed. @@ -48,44 +53,70 @@ const ( // BlockConnectedNtfn defines the blockconnected JSON-RPC notification. type BlockConnectedNtfn struct { - Hash string - Height int32 - Time int64 + Hash string + Height int32 + Time int64 + VoteBits uint16 } // NewBlockConnectedNtfn returns a new instance which can be used to issue a // blockconnected JSON-RPC notification. -func NewBlockConnectedNtfn(hash string, height int32, time int64) *BlockConnectedNtfn { +func NewBlockConnectedNtfn(hash string, height int32, time int64, voteBits uint16) *BlockConnectedNtfn { return &BlockConnectedNtfn{ - Hash: hash, - Height: height, - Time: time, + Hash: hash, + Height: height, + Time: time, + VoteBits: voteBits, } } // BlockDisconnectedNtfn defines the blockdisconnected JSON-RPC notification. type BlockDisconnectedNtfn struct { - Hash string - Height int32 - Time int64 + Hash string + Height int32 + Time int64 + VoteBits uint16 } // NewBlockDisconnectedNtfn returns a new instance which can be used to issue a // blockdisconnected JSON-RPC notification. -func NewBlockDisconnectedNtfn(hash string, height int32, time int64) *BlockDisconnectedNtfn { +func NewBlockDisconnectedNtfn(hash string, height int32, time int64, voteBits uint16) *BlockDisconnectedNtfn { return &BlockDisconnectedNtfn{ - Hash: hash, - Height: height, - Time: time, + Hash: hash, + Height: height, + Time: time, + VoteBits: voteBits, + } +} + +// ReorganizationNtfn defines the reorganization JSON-RPC notification. +type ReorganizationNtfn struct { + OldHash string + OldHeight int32 + NewHash string + NewHeight int32 +} + +// NewReorganizationNtfn returns a new instance which can be used to issue a +// blockdisconnected JSON-RPC notification. +func NewReorganizationNtfn(oldHash string, oldHeight int32, newHash string, + newHeight int32) *ReorganizationNtfn { + return &ReorganizationNtfn{ + OldHash: oldHash, + OldHeight: oldHeight, + NewHash: newHash, + NewHeight: newHeight, } } // BlockDetails describes details of a tx in a block. type BlockDetails struct { - Height int32 `json:"height"` - Hash string `json:"hash"` - Index int `json:"index"` - Time int64 `json:"time"` + Height int32 `json:"height"` + Tree int8 `json:"tree"` + Hash string `json:"hash"` + Index int `json:"index"` + Time int64 `json:"time"` + VoteBits uint16 `json:"votebits"` } // RecvTxNtfn defines the recvtx JSON-RPC notification. @@ -187,6 +218,7 @@ func init() { MustRegisterCmd(BlockConnectedNtfnMethod, (*BlockConnectedNtfn)(nil), flags) MustRegisterCmd(BlockDisconnectedNtfnMethod, (*BlockDisconnectedNtfn)(nil), flags) + MustRegisterCmd(ReorganizationNtfnMethod, (*ReorganizationNtfn)(nil), flags) MustRegisterCmd(RecvTxNtfnMethod, (*RecvTxNtfn)(nil), flags) MustRegisterCmd(RedeemingTxNtfnMethod, (*RedeemingTxNtfn)(nil), flags) MustRegisterCmd(RescanFinishedNtfnMethod, (*RescanFinishedNtfn)(nil), flags) diff --git a/btcjson/chainsvrwsntfns_test.go b/dcrjson/chainsvrwsntfns_test.go similarity index 62% rename from btcjson/chainsvrwsntfns_test.go rename to dcrjson/chainsvrwsntfns_test.go index 70c0241a..6353e5ce 100644 --- a/btcjson/chainsvrwsntfns_test.go +++ b/dcrjson/chainsvrwsntfns_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestChainSvrWsNtfns tests all of the chain server websocket-specific @@ -31,93 +32,101 @@ func TestChainSvrWsNtfns(t *testing.T) { { name: "blockconnected", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("blockconnected", "123", 100000, 123456789) + return dcrjson.NewCmd("blockconnected", "123", 100000, 123456789, 0) }, staticNtfn: func() interface{} { - return btcjson.NewBlockConnectedNtfn("123", 100000, 123456789) + return dcrjson.NewBlockConnectedNtfn("123", 100000, 123456789, 0) }, - marshalled: `{"jsonrpc":"1.0","method":"blockconnected","params":["123",100000,123456789],"id":null}`, - unmarshalled: &btcjson.BlockConnectedNtfn{ - Hash: "123", - Height: 100000, - Time: 123456789, + marshalled: `{"jsonrpc":"1.0","method":"blockconnected","params":["123",100000,123456789,0],"id":null}`, + unmarshalled: &dcrjson.BlockConnectedNtfn{ + Hash: "123", + Height: 100000, + Time: 123456789, + VoteBits: 0, }, }, { name: "blockdisconnected", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("blockdisconnected", "123", 100000, 123456789) + return dcrjson.NewCmd("blockdisconnected", "123", 100000, 123456789, 0) }, staticNtfn: func() interface{} { - return btcjson.NewBlockDisconnectedNtfn("123", 100000, 123456789) + return dcrjson.NewBlockDisconnectedNtfn("123", 100000, 123456789, 0) }, - marshalled: `{"jsonrpc":"1.0","method":"blockdisconnected","params":["123",100000,123456789],"id":null}`, - unmarshalled: &btcjson.BlockDisconnectedNtfn{ - Hash: "123", - Height: 100000, - Time: 123456789, + marshalled: `{"jsonrpc":"1.0","method":"blockdisconnected","params":["123",100000,123456789,0],"id":null}`, + unmarshalled: &dcrjson.BlockDisconnectedNtfn{ + Hash: "123", + Height: 100000, + Time: 123456789, + VoteBits: 0, }, }, { name: "recvtx", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("recvtx", "001122", `{"height":100000,"hash":"123","index":0,"time":12345678}`) + return dcrjson.NewCmd("recvtx", "001122", `{"height":100000,"tree":0,"hash":"123","index":0,"time":12345678,"votebits":0}`) }, staticNtfn: func() interface{} { - blockDetails := btcjson.BlockDetails{ - Height: 100000, - Hash: "123", - Index: 0, - Time: 12345678, + blockDetails := dcrjson.BlockDetails{ + Height: 100000, + Tree: 0, + Hash: "123", + Index: 0, + Time: 12345678, + VoteBits: 0, } - return btcjson.NewRecvTxNtfn("001122", &blockDetails) + return dcrjson.NewRecvTxNtfn("001122", &blockDetails) }, - marshalled: `{"jsonrpc":"1.0","method":"recvtx","params":["001122",{"height":100000,"hash":"123","index":0,"time":12345678}],"id":null}`, - unmarshalled: &btcjson.RecvTxNtfn{ + marshalled: `{"jsonrpc":"1.0","method":"recvtx","params":["001122",{"height":100000,"tree":0,"hash":"123","index":0,"time":12345678,"votebits":0}],"id":null}`, + unmarshalled: &dcrjson.RecvTxNtfn{ HexTx: "001122", - Block: &btcjson.BlockDetails{ - Height: 100000, - Hash: "123", - Index: 0, - Time: 12345678, + Block: &dcrjson.BlockDetails{ + Height: 100000, + Tree: 0, + Hash: "123", + Index: 0, + Time: 12345678, + VoteBits: 0, }, }, }, { name: "redeemingtx", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("redeemingtx", "001122", `{"height":100000,"hash":"123","index":0,"time":12345678}`) + return dcrjson.NewCmd("redeemingtx", "001122", `{"height":100000,"tree":0,"hash":"123","index":0,"time":12345678,"votebits":0}`) }, staticNtfn: func() interface{} { - blockDetails := btcjson.BlockDetails{ - Height: 100000, - Hash: "123", - Index: 0, - Time: 12345678, + blockDetails := dcrjson.BlockDetails{ + Height: 100000, + Hash: "123", + Index: 0, + Time: 12345678, + VoteBits: 0, } - return btcjson.NewRedeemingTxNtfn("001122", &blockDetails) + return dcrjson.NewRedeemingTxNtfn("001122", &blockDetails) }, - marshalled: `{"jsonrpc":"1.0","method":"redeemingtx","params":["001122",{"height":100000,"hash":"123","index":0,"time":12345678}],"id":null}`, - unmarshalled: &btcjson.RedeemingTxNtfn{ + marshalled: `{"jsonrpc":"1.0","method":"redeemingtx","params":["001122",{"height":100000,"tree":0,"hash":"123","index":0,"time":12345678,"votebits":0}],"id":null}`, + unmarshalled: &dcrjson.RedeemingTxNtfn{ HexTx: "001122", - Block: &btcjson.BlockDetails{ - Height: 100000, - Hash: "123", - Index: 0, - Time: 12345678, + Block: &dcrjson.BlockDetails{ + Height: 100000, + Hash: "123", + Index: 0, + Time: 12345678, + VoteBits: 0, }, }, }, { name: "rescanfinished", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("rescanfinished", "123", 100000, 12345678) + return dcrjson.NewCmd("rescanfinished", "123", 100000, 12345678) }, staticNtfn: func() interface{} { - return btcjson.NewRescanFinishedNtfn("123", 100000, 12345678) + return dcrjson.NewRescanFinishedNtfn("123", 100000, 12345678) }, marshalled: `{"jsonrpc":"1.0","method":"rescanfinished","params":["123",100000,12345678],"id":null}`, - unmarshalled: &btcjson.RescanFinishedNtfn{ + unmarshalled: &dcrjson.RescanFinishedNtfn{ Hash: "123", Height: 100000, Time: 12345678, @@ -126,13 +135,13 @@ func TestChainSvrWsNtfns(t *testing.T) { { name: "rescanprogress", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("rescanprogress", "123", 100000, 12345678) + return dcrjson.NewCmd("rescanprogress", "123", 100000, 12345678) }, staticNtfn: func() interface{} { - return btcjson.NewRescanProgressNtfn("123", 100000, 12345678) + return dcrjson.NewRescanProgressNtfn("123", 100000, 12345678) }, marshalled: `{"jsonrpc":"1.0","method":"rescanprogress","params":["123",100000,12345678],"id":null}`, - unmarshalled: &btcjson.RescanProgressNtfn{ + unmarshalled: &dcrjson.RescanProgressNtfn{ Hash: "123", Height: 100000, Time: 12345678, @@ -141,13 +150,13 @@ func TestChainSvrWsNtfns(t *testing.T) { { name: "txaccepted", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("txaccepted", "123", 1.5) + return dcrjson.NewCmd("txaccepted", "123", 1.5) }, staticNtfn: func() interface{} { - return btcjson.NewTxAcceptedNtfn("123", 1.5) + return dcrjson.NewTxAcceptedNtfn("123", 1.5) }, marshalled: `{"jsonrpc":"1.0","method":"txaccepted","params":["123",1.5],"id":null}`, - unmarshalled: &btcjson.TxAcceptedNtfn{ + unmarshalled: &dcrjson.TxAcceptedNtfn{ TxID: "123", Amount: 1.5, }, @@ -155,10 +164,10 @@ func TestChainSvrWsNtfns(t *testing.T) { { name: "txacceptedverbose", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("txacceptedverbose", `{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"vin":null,"vout":null,"confirmations":0}`) + return dcrjson.NewCmd("txacceptedverbose", `{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"vin":null,"vout":null,"confirmations":0}`) }, staticNtfn: func() interface{} { - txResult := btcjson.TxRawResult{ + txResult := dcrjson.TxRawResult{ Hex: "001122", Txid: "123", Version: 1, @@ -167,11 +176,11 @@ func TestChainSvrWsNtfns(t *testing.T) { Vout: nil, Confirmations: 0, } - return btcjson.NewTxAcceptedVerboseNtfn(txResult) + return dcrjson.NewTxAcceptedVerboseNtfn(txResult) }, - marshalled: `{"jsonrpc":"1.0","method":"txacceptedverbose","params":[{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"vin":null,"vout":null}],"id":null}`, - unmarshalled: &btcjson.TxAcceptedVerboseNtfn{ - RawTx: btcjson.TxRawResult{ + marshalled: `{"jsonrpc":"1.0","method":"txacceptedverbose","params":[{"hex":"001122","txid":"123","version":1,"locktime":4294967295,"expiry":0,"vin":null,"vout":null,"blockheight":0,"blockindex":0}],"id":null}`, + unmarshalled: &dcrjson.TxAcceptedVerboseNtfn{ + RawTx: dcrjson.TxRawResult{ Hex: "001122", Txid: "123", Version: 1, @@ -188,7 +197,7 @@ func TestChainSvrWsNtfns(t *testing.T) { for i, test := range tests { // Marshal the notification as created by the new static // creation function. The ID is nil for notifications. - marshalled, err := btcjson.MarshalCmd(nil, test.staticNtfn()) + marshalled, err := dcrjson.MarshalCmd(nil, test.staticNtfn()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -213,7 +222,7 @@ func TestChainSvrWsNtfns(t *testing.T) { // Marshal the notification as created by the generic new // notification creation function. The ID is nil for // notifications. - marshalled, err = btcjson.MarshalCmd(nil, cmd) + marshalled, err = dcrjson.MarshalCmd(nil, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -227,7 +236,7 @@ func TestChainSvrWsNtfns(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -235,7 +244,7 @@ func TestChainSvrWsNtfns(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/dcrjson/chainsvrwsresults.go b/dcrjson/chainsvrwsresults.go new file mode 100644 index 00000000..0916a90b --- /dev/null +++ b/dcrjson/chainsvrwsresults.go @@ -0,0 +1,11 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson + +// SessionResult models the data from the session command. +type SessionResult struct { + SessionID uint64 `json:"sessionid"` +} diff --git a/btcjson/cmdinfo.go b/dcrjson/cmdinfo.go similarity index 99% rename from btcjson/cmdinfo.go rename to dcrjson/cmdinfo.go index 6fe4f9f7..04fcf1c1 100644 --- a/btcjson/cmdinfo.go +++ b/dcrjson/cmdinfo.go @@ -1,8 +1,9 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "fmt" diff --git a/btcjson/cmdinfo_test.go b/dcrjson/cmdinfo_test.go similarity index 88% rename from btcjson/cmdinfo_test.go rename to dcrjson/cmdinfo_test.go index d2b14774..6959ad01 100644 --- a/btcjson/cmdinfo_test.go +++ b/dcrjson/cmdinfo_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestCmdMethod tests the CmdMethod function to ensure it retuns the expected @@ -25,35 +26,35 @@ func TestCmdMethod(t *testing.T) { { name: "unregistered type", cmd: (*int)(nil), - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "nil pointer of registered type", - cmd: (*btcjson.GetBlockCmd)(nil), + cmd: (*dcrjson.GetBlockCmd)(nil), method: "getblock", }, { name: "nil instance of registered type", - cmd: &btcjson.GetBlockCountCmd{}, + cmd: &dcrjson.GetBlockCountCmd{}, method: "getblockcount", }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - method, err := btcjson.CmdMethod(test.cmd) + method, err := dcrjson.CmdMethod(test.cmd) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.(btcjson.Error).ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.(dcrjson.Error).Code { t.Errorf("Test #%d (%s) mismatched error code "+ "- got %v (%v), want %v", i, test.name, gotErrorCode, err, - test.err.(btcjson.Error).ErrorCode) + test.err.(dcrjson.Error).Code) continue } @@ -78,12 +79,12 @@ func TestMethodUsageFlags(t *testing.T) { name string method string err error - flags btcjson.UsageFlag + flags dcrjson.UsageFlag }{ { name: "unregistered type", method: "bogusmethod", - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "getblock", @@ -93,25 +94,25 @@ func TestMethodUsageFlags(t *testing.T) { { name: "walletpassphrase", method: "walletpassphrase", - flags: btcjson.UFWalletOnly, + flags: dcrjson.UFWalletOnly, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - flags, err := btcjson.MethodUsageFlags(test.method) + flags, err := dcrjson.MethodUsageFlags(test.method) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.(btcjson.Error).ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.(dcrjson.Error).Code { t.Errorf("Test #%d (%s) mismatched error code "+ "- got %v (%v), want %v", i, test.name, gotErrorCode, err, - test.err.(btcjson.Error).ErrorCode) + test.err.(dcrjson.Error).Code) continue } @@ -141,7 +142,7 @@ func TestMethodUsageText(t *testing.T) { { name: "unregistered type", method: "bogusmethod", - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "getblockcount", @@ -157,19 +158,19 @@ func TestMethodUsageText(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - usage, err := btcjson.MethodUsageText(test.method) + usage, err := dcrjson.MethodUsageText(test.method) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } if err != nil { - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.(btcjson.Error).ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.(dcrjson.Error).Code { t.Errorf("Test #%d (%s) mismatched error code "+ "- got %v (%v), want %v", i, test.name, gotErrorCode, err, - test.err.(btcjson.Error).ErrorCode) + test.err.(dcrjson.Error).Code) continue } @@ -184,7 +185,7 @@ func TestMethodUsageText(t *testing.T) { } // Get the usage again to excerise caching. - usage, err = btcjson.MethodUsageText(test.method) + usage, err = dcrjson.MethodUsageText(test.method) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -420,7 +421,7 @@ func TestFieldUsage(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Ensure usage matches the expected value. - usage := btcjson.TstFieldUsage(test.field, test.defValue) + usage := dcrjson.TstFieldUsage(test.field, test.defValue) if usage != test.expected { t.Errorf("Test #%d (%s) mismatched usage - got %v, "+ "want %v", i, test.name, usage, test.expected) diff --git a/btcjson/cmdparse.go b/dcrjson/cmdparse.go similarity index 99% rename from btcjson/cmdparse.go rename to dcrjson/cmdparse.go index 6e21b946..fbdec5a3 100644 --- a/btcjson/cmdparse.go +++ b/dcrjson/cmdparse.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "encoding/json" diff --git a/btcjson/cmdparse_test.go b/dcrjson/cmdparse_test.go similarity index 76% rename from btcjson/cmdparse_test.go rename to dcrjson/cmdparse_test.go index 2f8fa1fb..07736f42 100644 --- a/btcjson/cmdparse_test.go +++ b/dcrjson/cmdparse_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "encoding/json" @@ -10,7 +11,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestAssignField tests the assignField function handles supported combinations @@ -168,7 +169,7 @@ func TestAssignField(t *testing.T) { for i, test := range tests { dst := reflect.New(reflect.TypeOf(test.dest)).Elem() src := reflect.ValueOf(test.src) - err := btcjson.TstAssignField(1, "testField", dst, src) + err := dcrjson.TstAssignField(1, "testField", dst, src) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -197,133 +198,133 @@ func TestAssignFieldErrors(t *testing.T) { name string dest interface{} src interface{} - err btcjson.Error + err dcrjson.Error }{ { name: "general incompatible int -> string", dest: string(0), src: int(0), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow source int -> dest int", dest: int8(0), src: int(128), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow source int -> dest uint", dest: uint8(0), src: int(256), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "int -> float", dest: float32(0), src: int(256), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow source uint64 -> dest int64", dest: int64(0), src: uint64(1 << 63), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow source uint -> dest int", dest: int8(0), src: uint(128), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow source uint -> dest uint", dest: uint8(0), src: uint(256), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "uint -> float", dest: float32(0), src: uint(256), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "float -> int", dest: int(0), src: float32(1.0), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow float64 -> float32", dest: float32(0), src: float64(math.MaxFloat64), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> bool", dest: true, src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> int", dest: int8(0), src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow string -> int", dest: int8(0), src: "128", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> uint", dest: uint8(0), src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow string -> uint", dest: uint8(0), src: "256", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> float", dest: float32(0), src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "overflow string -> float", dest: float32(0), src: "1.7976931348623157e+308", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> array", dest: [3]int{}, src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> slice", dest: []int{}, src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> struct", dest: struct{ A int }{}, src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid string -> map", dest: map[string]int{}, src: "foo", - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, } @@ -331,17 +332,17 @@ func TestAssignFieldErrors(t *testing.T) { for i, test := range tests { dst := reflect.New(reflect.TypeOf(test.dest)).Elem() src := reflect.ValueOf(test.src) - err := btcjson.TstAssignField(1, "testField", dst, src) + err := dcrjson.TstAssignField(1, "testField", dst, src) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[3]v), "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v (%v), want %v", i, test.name, gotErrorCode, - err, test.err.ErrorCode) + err, test.err.Code) continue } } @@ -355,47 +356,47 @@ func TestNewCmdErrors(t *testing.T) { name string method string args []interface{} - err btcjson.Error + err dcrjson.Error }{ { name: "unregistered command", method: "boguscommand", args: []interface{}{}, - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "too few parameters to command with required + optional", method: "getblock", args: []interface{}{}, - err: btcjson.Error{ErrorCode: btcjson.ErrNumParams}, + err: dcrjson.Error{Code: dcrjson.ErrNumParams}, }, { name: "too many parameters to command with no optional", method: "getblockcount", args: []interface{}{"123"}, - err: btcjson.Error{ErrorCode: btcjson.ErrNumParams}, + err: dcrjson.Error{Code: dcrjson.ErrNumParams}, }, { name: "incorrect parameter type", method: "getblock", args: []interface{}{1}, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := btcjson.NewCmd(test.method, test.args...) + _, err := dcrjson.NewCmd(test.method, test.args...) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v (%v), want %v", i, test.name, gotErrorCode, - err, test.err.ErrorCode) + err, test.err.Code) continue } } @@ -409,41 +410,41 @@ func TestMarshalCmdErrors(t *testing.T) { name string id interface{} cmd interface{} - err btcjson.Error + err dcrjson.Error }{ { name: "unregistered type", id: 1, cmd: (*int)(nil), - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "nil instance of registered type", id: 1, - cmd: (*btcjson.GetBlockCmd)(nil), - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + cmd: (*dcrjson.GetBlockCmd)(nil), + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "nil instance of registered type", id: []int{0, 1}, - cmd: &btcjson.GetBlockCountCmd{}, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + cmd: &dcrjson.GetBlockCountCmd{}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := btcjson.MarshalCmd(test.id, test.cmd) + _, err := dcrjson.MarshalCmd(test.id, test.cmd) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v (%v), want %v", i, test.name, gotErrorCode, - err, test.err.ErrorCode) + err, test.err.Code) continue } } @@ -455,64 +456,64 @@ func TestUnmarshalCmdErrors(t *testing.T) { tests := []struct { name string - request btcjson.Request - err btcjson.Error + request dcrjson.Request + err dcrjson.Error }{ { name: "unregistered type", - request: btcjson.Request{ + request: dcrjson.Request{ Jsonrpc: "1.0", Method: "bogusmethod", Params: nil, ID: nil, }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "incorrect number of params", - request: btcjson.Request{ + request: dcrjson.Request{ Jsonrpc: "1.0", Method: "getblockcount", Params: []json.RawMessage{[]byte(`"bogusparam"`)}, ID: nil, }, - err: btcjson.Error{ErrorCode: btcjson.ErrNumParams}, + err: dcrjson.Error{Code: dcrjson.ErrNumParams}, }, { name: "invalid type for a parameter", - request: btcjson.Request{ + request: dcrjson.Request{ Jsonrpc: "1.0", Method: "getblock", Params: []json.RawMessage{[]byte("1")}, ID: nil, }, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid JSON for a parameter", - request: btcjson.Request{ + request: dcrjson.Request{ Jsonrpc: "1.0", Method: "getblock", Params: []json.RawMessage{[]byte(`"1`)}, ID: nil, }, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := btcjson.UnmarshalCmd(&test.request) + _, err := dcrjson.UnmarshalCmd(&test.request) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v (%v), want %v", i, test.name, gotErrorCode, - err, test.err.ErrorCode) + err, test.err.Code) continue } } diff --git a/dcrjson/dcrdextcmds.go b/dcrjson/dcrdextcmds.go new file mode 100644 index 00000000..0a296e0f --- /dev/null +++ b/dcrjson/dcrdextcmds.go @@ -0,0 +1,87 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// NOTE: This file is intended to house the RPC commands that are supported by +// a chain server with btcd extensions. + +package dcrjson + +// ExistsAddressCmd defines the existsaddress JSON-RPC command. +type ExistsAddressCmd struct { + Address string +} + +// NewExistsAddressCmd returns a new instance which can be used to issue a +// existsaddress JSON-RPC command. +// +// The parameters which are pointers indicate they are optional. Passing nil +// for optional parameters will use the default value. +func NewExistsAddressCmd(address string) *ExistsAddressCmd { + return &ExistsAddressCmd{ + Address: address, + } +} + +// GetStakeDifficultyCmdis a type handling custom marshaling and +// unmarshaling of getstakedifficulty JSON RPC commands. +type GetStakeDifficultyCmd struct{} + +// NewGetStakeDifficultyCmd returns a new instance which can be used to +// issue a JSON-RPC getstakedifficulty command. +func NewGetStakeDifficultyCmd() *GetStakeDifficultyCmd { + return &GetStakeDifficultyCmd{} +} + +// MissedTicketsCmd is a type handling custom marshaling and +// unmarshaling of missedtickets JSON RPC commands. +type MissedTicketsCmd struct{} + +// NewMissedTicketsCmd returns a new instance which can be used to issue a JSON-RPC +// missedtickets command. +func NewMissedTicketsCmd() *MissedTicketsCmd { + return &MissedTicketsCmd{} +} + +// RebroadcastMissedCmd is a type handling custom marshaling and +// unmarshaling of rebroadcastwinners JSON RPC commands. +type RebroadcastMissedCmd struct{} + +// NewRebroadcastMissedCmd returns a new instance which can be used to +// issue a JSON-RPC rebroadcastmissed command. +func NewRebroadcastMissedCmd() *RebroadcastMissedCmd { + return &RebroadcastMissedCmd{} +} + +// RebroadcastWinnersCmd is a type handling custom marshaling and +// unmarshaling of rebroadcastwinners JSON RPC commands. +type RebroadcastWinnersCmd struct{} + +// NewRebroadcastWinnersCmd returns a new instance which can be used to +// issue a JSON-RPC rebroadcastwinners command. +func NewRebroadcastWinnersCmd() *RebroadcastWinnersCmd { + return &RebroadcastWinnersCmd{} +} + +// TicketsForAddressCmd defines the ticketsforbucket JSON-RPC command. +type TicketsForAddressCmd struct { + Address string +} + +// NewTicketsForAddressCmd returns a new instance which can be used to issue a +// JSON-RPC tickets for bucket command. +func NewTicketsForAddressCmd(addr string) *TicketsForAddressCmd { + return &TicketsForAddressCmd{addr} +} + +func init() { + // No special flags for commands in this file. + flags := UsageFlag(0) + + MustRegisterCmd("existsaddress", (*ExistsAddressCmd)(nil), flags) + MustRegisterCmd("getstakedifficulty", (*GetStakeDifficultyCmd)(nil), flags) + MustRegisterCmd("missedtickets", (*MissedTicketsCmd)(nil), flags) + MustRegisterCmd("rebroadcastmissed", (*RebroadcastMissedCmd)(nil), flags) + MustRegisterCmd("rebroadcastwinners", (*RebroadcastWinnersCmd)(nil), flags) + MustRegisterCmd("ticketsforaddress", (*TicketsForAddressCmd)(nil), flags) +} diff --git a/dcrjson/dcrdextcmds_test.go b/dcrjson/dcrdextcmds_test.go new file mode 100644 index 00000000..70f40c17 --- /dev/null +++ b/dcrjson/dcrdextcmds_test.go @@ -0,0 +1,112 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson_test + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/decred/dcrd/dcrjson" +) + +// TestBtcdExtCmds tests all of the btcd extended commands marshal and unmarshal +// into valid results include handling of optional fields being omitted in the +// marshalled command, while optional fields with defaults have the default +// assigned on unmarshalled commands. +func TestDcrdExtCmds(t *testing.T) { + t.Parallel() + + testID := int(1) + tests := []struct { + name string + newCmd func() (interface{}, error) + staticCmd func() interface{} + marshalled string + unmarshalled interface{} + }{ + { + name: "debuglevel", + newCmd: func() (interface{}, error) { + return dcrjson.NewCmd("debuglevel", "trace") + }, + staticCmd: func() interface{} { + return dcrjson.NewDebugLevelCmd("trace") + }, + marshalled: `{"jsonrpc":"1.0","method":"debuglevel","params":["trace"],"id":1}`, + unmarshalled: &dcrjson.DebugLevelCmd{ + LevelSpec: "trace", + }, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Marshal the command as created by the new static command + // creation function. + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + // Ensure the command is created without error via the generic + // new command creation function. + cmd, err := test.newCmd() + if err != nil { + t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ", + i, test.name, err) + } + + // Marshal the command as created by the generic new command + // creation function. + marshalled, err = dcrjson.MarshalCmd(testID, cmd) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + var request dcrjson.Request + if err := json.Unmarshal(marshalled, &request); err != nil { + t.Errorf("Test #%d (%s) unexpected error while "+ + "unmarshalling JSON-RPC request: %v", i, + test.name, err) + continue + } + + cmd, err = dcrjson.UnmarshalCmd(&request) + if err != nil { + t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !reflect.DeepEqual(cmd, test.unmarshalled) { + t.Errorf("Test #%d (%s) unexpected unmarshalled command "+ + "- got %s, want %s", i, test.name, + fmt.Sprintf("(%T) %+[1]v", cmd), + fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled)) + continue + } + } +} diff --git a/dcrjson/dcrdextresults.go b/dcrjson/dcrdextresults.go new file mode 100644 index 00000000..51ced9f0 --- /dev/null +++ b/dcrjson/dcrdextresults.go @@ -0,0 +1,36 @@ +// Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson + +// ExistsAddressResult models the data returned from the existsaddress +// command. +type ExistsAddressResult struct { + Exists bool `json:"exists"` +} + +// GetStakeDifficultyResult models the data returned from the getstakedifficulty +// command. +type GetStakeDifficultyResult struct { + Difficulty float64 `json:"difficulty"` +} + +// ExistsAddressResult models the data returned from the missedtickets +// command. +type MissedTicketsResult struct { + Tickets []string `json:"tickets"` +} + +// Ticket is the structure representing a ticket. +type Ticket struct { + Hash string `json:"hash"` + Owner string `json:"owner"` +} + +// TicketsForAddressResult models the data returned from the ticketforaddress +// command. +type TicketsForAddressResult struct { + Tickets []string `json:"tickets"` +} diff --git a/dcrjson/dcrwalletextcmds.go b/dcrjson/dcrwalletextcmds.go new file mode 100644 index 00000000..6d92cad9 --- /dev/null +++ b/dcrjson/dcrwalletextcmds.go @@ -0,0 +1,398 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// NOTE: This file is intended to house the RPC commands that are +// supported by a wallet server with btcwallet extensions. + +package dcrjson + +// SStxInput represents the inputs to an SStx transaction. Specifically a +// transactionsha and output number pair, along with the output amounts. +type SStxInput struct { + Txid string `json:"txid"` + Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` + Amt int64 `json:"amt"` +} + +// SStxOutput represents the output to an SStx transaction. Specifically a +// a commitment address and amount, and a change address and amount. +type SStxCommitOut struct { + Addr string `json:"addr"` + CommitAmt int64 `json:"commitamt"` + ChangeAddr string `json:"changeaddr"` + ChangeAmt int64 `json:"changeamt"` +} + +// CreateRawSStxCmd is a type handling custom marshaling and +// unmarshaling of createrawsstx JSON RPC commands. +type CreateRawSStxCmd struct { + Inputs []SStxInput + Amount map[string]int64 + COuts []SStxCommitOut +} + +// NewCreateRawSStxCmd creates a new CreateRawSStxCmd. +func NewCreateRawSStxCmd(inputs []SStxInput, amount map[string]int64, + couts []SStxCommitOut) *CreateRawSStxCmd { + return &CreateRawSStxCmd{ + Inputs: inputs, + Amount: amount, + COuts: couts, + } +} + +// CreateRawSSGenTxCmd is a type handling custom marshaling and +// unmarshaling of createrawssgentxcmd JSON RPC commands. +type CreateRawSSGenTxCmd struct { + Inputs []TransactionInput + VoteBits uint16 +} + +// NewCreateRawSSGenTxCmd creates a new CreateRawSSGenTxCmd. +func NewCreateRawSSGenTxCmd(inputs []TransactionInput, + vb uint16) *CreateRawSSGenTxCmd { + return &CreateRawSSGenTxCmd{ + Inputs: inputs, + VoteBits: vb, + } +} + +// CreateRawSSRtxCmd is a type handling custom marshaling and +// unmarshaling of createrawssrtx JSON RPC commands. +type CreateRawSSRtxCmd struct { + Inputs []TransactionInput +} + +// NewCreateRawSSRtxCmd creates a new CreateRawSSRtxCmd. +func NewCreateRawSSRtxCmd(inputs []TransactionInput) *CreateRawSSRtxCmd { + return &CreateRawSSRtxCmd{ + Inputs: inputs, + } +} + +// GetMultisigOutInfoCmd is a type handling custom marshaling and +// unmarshaling of getmultisigoutinfo JSON websocket extension +// commands. +type GetMultisigOutInfoCmd struct { + Hash string + Index uint32 +} + +// NewGetMultisigOutInfoCmd creates a new GetMultisigOutInfoCmd. +func NewGetMultisigOutInfoCmd(hash string, index uint32) *GetMultisigOutInfoCmd { + return &GetMultisigOutInfoCmd{hash, index} +} + +// GetMasterPubkeyCmd is a type handling custom marshaling and unmarshaling of +// getmasterpubkey JSON wallet extension commands. +type GetMasterPubkeyCmd struct { +} + +// NewGetSeedCmd creates a new GetSeedCmd. +func NewGetMasterPubkeyCmd() *GetMasterPubkeyCmd { + return &GetMasterPubkeyCmd{} +} + +// GetSeedCmd is a type handling custom marshaling and +// unmarshaling of getseed JSON wallet extension +// commands. +type GetSeedCmd struct { +} + +// NewGetSeedCmd creates a new GetSeedCmd. +func NewGetSeedCmd() *GetSeedCmd { + return &GetSeedCmd{} +} + +// GetTicketMaxPriceCmd is a type handling custom marshaling and +// unmarshaling of getticketmaxprice JSON wallet extension +// commands. +type GetTicketMaxPriceCmd struct { +} + +// NewGetTicketMaxPriceCmd creates a new GetTicketMaxPriceCmd. +func NewGetTicketMaxPriceCmd() *GetTicketMaxPriceCmd { + return &GetTicketMaxPriceCmd{} +} + +// GetTicketsCmd is a type handling custom marshaling and +// unmarshaling of gettickets JSON wallet extension +// commands. +type GetTicketsCmd struct { + IncludeImmature bool +} + +// NewGetTicketsCmd creates a new GetTicketsCmd. +func NewGetTicketsCmd(includeImmature bool) *GetTicketsCmd { + return &GetTicketsCmd{includeImmature} +} + +// GetMultisigOutInfoCmd is a type handling custom marshaling and +// unmarshaling of getmultisigoutinfo JSON websocket extension +// commands. +type ImportScriptCmd struct { + Hex string +} + +// NewGetMultisigOutInfoCmd creates a new GetMultisigOutInfoCmd. +func NewImportScriptCmd(hex string) *ImportScriptCmd { + return &ImportScriptCmd{hex} +} + +// NotifyWinningTicketsCmd is a type handling custom marshaling and +// unmarshaling of notifywinningtickets JSON websocket extension +// commands. +type NotifyWinningTicketsCmd struct { +} + +// NewNotifyWinningTicketsCmd creates a new NotifyWinningTicketsCmd. +func NewNotifyWinningTicketsCmd() *NotifyWinningTicketsCmd { + return &NotifyWinningTicketsCmd{} +} + +// NotifySpentAndMissedTicketsCmd is a type handling custom marshaling and +// unmarshaling of notifyspentandmissedtickets JSON websocket extension +// commands. +type NotifySpentAndMissedTicketsCmd struct { +} + +// NewNotifySpentAndMissedTicketsCmd creates a new NotifySpentAndMissedTicketsCmd. +func NewNotifySpentAndMissedTicketsCmd() *NotifySpentAndMissedTicketsCmd { + return &NotifySpentAndMissedTicketsCmd{} +} + +// NotifyNewTicketsCmd is a type handling custom marshaling and +// unmarshaling of notifynewtickets JSON websocket extension +// commands. +type NotifyNewTicketsCmd struct { +} + +// NewNotifyNewTicketsCmd creates a new NotifyNewTicketsCmd. +func NewNotifyNewTicketsCmd() *NotifyNewTicketsCmd { + return &NotifyNewTicketsCmd{} +} + +// NotifyStakeDifficultyCmd is a type handling custom marshaling and +// unmarshaling of notifystakedifficulty JSON websocket extension +// commands. +type NotifyStakeDifficultyCmd struct { +} + +// NewNotifyStakeDifficultyCmd creates a new NotifyStakeDifficultyCmd. +func NewNotifyStakeDifficultyCmd() *NotifyStakeDifficultyCmd { + return &NotifyStakeDifficultyCmd{} +} + +// PurchaseTicketCmd is a type handling custom marshaling and +// unmarshaling of purchaseticket JSON RPC commands. +type PurchaseTicketCmd struct { + FromAccount string + SpendLimit float64 // In Coins + MinConf *int `jsonrpcdefault:"1"` + TicketAddress *string + Comment *string +} + +// NewPurchaseTicketCmd creates a new PurchaseTicketCmd. +func NewPurchaseTicketCmd(fromAccount string, spendLimit float64, minConf *int, + ticketAddress *string, comment *string) *PurchaseTicketCmd { + return &PurchaseTicketCmd{ + FromAccount: fromAccount, + SpendLimit: spendLimit, + MinConf: minConf, + TicketAddress: ticketAddress, + Comment: comment, + } +} + +// RedeemMultiSigOutCmd is a type handling custom marshaling and +// unmarshaling of redeemmultisigout JSON RPC commands. +type RedeemMultiSigOutCmd struct { + Hash string + Index uint32 + Tree int8 + Address *string +} + +// NewRedeemMultiSigOutCmd creates a new RedeemMultiSigOutCmd. +func NewRedeemMultiSigOutCmd(hash string, index uint32, tree int8, + address *string) *RedeemMultiSigOutCmd { + return &RedeemMultiSigOutCmd{ + Hash: hash, + Index: index, + Tree: tree, + Address: address, + } +} + +// RedeemMultiSigOutsCmd is a type handling custom marshaling and +// unmarshaling of redeemmultisigout JSON RPC commands. +type RedeemMultiSigOutsCmd struct { + FromScrAddress string + ToAddress *string + Number *int +} + +// NewRedeemMultiSigOutCmd creates a new RedeemMultiSigOutCmd. +func NewRedeemMultiSigOutsCmd(from string, to *string, + number *int) *RedeemMultiSigOutsCmd { + return &RedeemMultiSigOutsCmd{ + FromScrAddress: from, + ToAddress: to, + Number: number, + } +} + +// SendToMultisigCmd is a type handling custom marshaling and +// unmarshaling of sendtomultisig JSON RPC commands. +type SendToMultiSigCmd struct { + FromAccount string + Amount float64 + Pubkeys []string + NRequired *int `jsonrpcdefault:"1"` + MinConf *int `jsonrpcdefault:"1"` + Comment *string +} + +// NewSendToMultiSigCmd creates a new SendToMultiSigCmd. +func NewSendToMultiSigCmd(fromaccount string, amount float64, pubkeys []string, + nrequired *int, minConf *int, comment *string) *SendToMultiSigCmd { + return &SendToMultiSigCmd{ + FromAccount: fromaccount, + Amount: amount, + Pubkeys: pubkeys, + NRequired: nrequired, + MinConf: minConf, + Comment: comment, + } +} + +// SendToSStxCmd is a type handling custom marshaling and +// unmarshaling of sendtosstx JSON RPC commands. +type SendToSStxCmd struct { + FromAccount string + Amounts map[string]int64 + Inputs []SStxInput + COuts []SStxCommitOut + MinConf *int `jsonrpcdefault:"1"` + Comment *string +} + +// NewSendToSStxCmd creates a new SendToSStxCmd. Optionally a +// pointer to a TemplateRequest may be provided. +func NewSendToSStxCmd(fromaccount string, amounts map[string]int64, + inputs []SStxInput, couts []SStxCommitOut, minConf *int, + comment *string) *SendToSStxCmd { + return &SendToSStxCmd{ + FromAccount: fromaccount, + Amounts: amounts, + Inputs: inputs, + COuts: couts, + MinConf: minConf, + Comment: comment, + } +} + +type SendToSSGenCmd struct { + FromAccount string + TicketHash string + BlockHash string + Height int64 + VoteBits uint16 + Comment *string +} + +// NewSendToSSGenCmd creates a new SendToSSGenCmd. Optionally a +// pointer to a TemplateRequest may be provided. +func NewSendToSSGenCmd(fromaccount string, tickethash string, blockhash string, + height int64, votebits uint16, comment *string) *SendToSSGenCmd { + return &SendToSSGenCmd{ + FromAccount: fromaccount, + TicketHash: tickethash, + BlockHash: blockhash, + Height: height, + VoteBits: votebits, + Comment: comment, + } +} + +// SendToSSRtxCmd is a type handling custom marshaling and +// unmarshaling of sendtossrtx JSON RPC commands. +type SendToSSRtxCmd struct { + FromAccount string + TicketHash string + Comment *string +} + +// NewSendToSSRtxCmd creates a new SendToSSRtxCmd. Optionally a +// pointer to a TemplateRequest may be provided. +func NewSendToSSRtxCmd(fromaccount string, tickethash string, + comment *string) *SendToSSRtxCmd { + return &SendToSSRtxCmd{ + FromAccount: fromaccount, + TicketHash: tickethash, + Comment: comment, + } +} + +// SetTicketMaxPriceCmd is a type handling custom marshaling and +// unmarshaling of setticketmaxprice JSON RPC commands. +type SetTicketMaxPriceCmd struct { + Max float64 +} + +func NewSetTicketMaxPriceCmd(max float64) *SetTicketMaxPriceCmd { + return &SetTicketMaxPriceCmd{ + Max: max, + } +} + +// SignRawTransactionsCmd defines the signrawtransactions JSON-RPC command. +type SignRawTransactionsCmd struct { + RawTxs []string + Send *bool `jsonrpcdefault:"true"` +} + +// NewSignRawTransactionCmd returns a new instance which can be used to issue a +// signrawtransactions JSON-RPC command. +func NewSignRawTransactionsCmd(hexEncodedTxs []string, + send *bool) *SignRawTransactionsCmd { + return &SignRawTransactionsCmd{ + RawTxs: hexEncodedTxs, + Send: send, + } +} + +func init() { + // The commands in this file are only usable with a wallet + // server. + flags := UFWalletOnly + + MustRegisterCmd("createrawsstx", (*CreateRawSStxCmd)(nil), flags) + MustRegisterCmd("createrawssgentx", (*CreateRawSSGenTxCmd)(nil), flags) + MustRegisterCmd("createrawssrtx", (*CreateRawSSRtxCmd)(nil), flags) + MustRegisterCmd("getmultisigoutinfo", (*GetMultisigOutInfoCmd)(nil), flags) + MustRegisterCmd("getmasterpubkey", (*GetMasterPubkeyCmd)(nil), flags) + MustRegisterCmd("getseed", (*GetSeedCmd)(nil), flags) + MustRegisterCmd("getticketmaxprice", (*GetTicketMaxPriceCmd)(nil), flags) + MustRegisterCmd("gettickets", (*GetTicketsCmd)(nil), flags) + MustRegisterCmd("importscript", (*ImportScriptCmd)(nil), flags) + MustRegisterCmd("notifynewtickets", (*NotifyNewTicketsCmd)(nil), flags) + MustRegisterCmd("notifyspentandmissedtickets", + (*NotifySpentAndMissedTicketsCmd)(nil), flags) + MustRegisterCmd("notifystakedifficulty", + (*NotifyStakeDifficultyCmd)(nil), flags) + MustRegisterCmd("notifywinningtickets", + (*NotifyWinningTicketsCmd)(nil), flags) + MustRegisterCmd("purchaseticket", (*PurchaseTicketCmd)(nil), flags) + MustRegisterCmd("redeemmultisigout", (*RedeemMultiSigOutCmd)(nil), flags) + MustRegisterCmd("redeemmultisigouts", (*RedeemMultiSigOutsCmd)(nil), flags) + MustRegisterCmd("sendtomultisig", (*SendToMultiSigCmd)(nil), flags) + MustRegisterCmd("sendtosstx", (*SendToSStxCmd)(nil), flags) + MustRegisterCmd("sendtossgen", (*SendToSSGenCmd)(nil), flags) + MustRegisterCmd("sendtossrtx", (*SendToSSRtxCmd)(nil), flags) + MustRegisterCmd("setticketmaxprice", (*SetTicketMaxPriceCmd)(nil), flags) + MustRegisterCmd("signrawtransactions", (*SignRawTransactionsCmd)(nil), flags) +} diff --git a/dcrjson/dcrwalletextcmds_test.go b/dcrjson/dcrwalletextcmds_test.go new file mode 100644 index 00000000..61400233 --- /dev/null +++ b/dcrjson/dcrwalletextcmds_test.go @@ -0,0 +1,143 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson_test + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/decred/dcrd/dcrjson" +) + +// TestDcrWalletExtCmds tests all of the btcwallet extended commands marshal and +// unmarshal into valid results include handling of optional fields being +// omitted in the marshalled command, while optional fields with defaults have +// the default assigned on unmarshalled commands. +func TestDcrWalletExtCmds(t *testing.T) { + t.Parallel() + + testID := int(1) + tests := []struct { + name string + newCmd func() (interface{}, error) + staticCmd func() interface{} + marshalled string + unmarshalled interface{} + }{ + { + name: "notifywinningtickets", + newCmd: func() (interface{}, error) { + return dcrjson.NewCmd("notifywinningtickets") + }, + staticCmd: func() interface{} { + return dcrjson.NewNotifyWinningTicketsCmd() + }, + marshalled: `{"jsonrpc":"1.0","method":"notifywinningtickets","params":[],"id":1}`, + unmarshalled: &dcrjson.NotifyWinningTicketsCmd{}, + }, + { + name: "notifyspentandmissedtickets", + newCmd: func() (interface{}, error) { + return dcrjson.NewCmd("notifyspentandmissedtickets") + }, + staticCmd: func() interface{} { + return dcrjson.NewNotifySpentAndMissedTicketsCmd() + }, + marshalled: `{"jsonrpc":"1.0","method":"notifyspentandmissedtickets","params":[],"id":1}`, + unmarshalled: &dcrjson.NotifySpentAndMissedTicketsCmd{}, + }, + { + name: "notifynewtickets", + newCmd: func() (interface{}, error) { + return dcrjson.NewCmd("notifynewtickets") + }, + staticCmd: func() interface{} { + return dcrjson.NewNotifyNewTicketsCmd() + }, + marshalled: `{"jsonrpc":"1.0","method":"notifynewtickets","params":[],"id":1}`, + unmarshalled: &dcrjson.NotifyNewTicketsCmd{}, + }, + { + name: "notifystakedifficulty", + newCmd: func() (interface{}, error) { + return dcrjson.NewCmd("notifystakedifficulty") + }, + staticCmd: func() interface{} { + return dcrjson.NewNotifyStakeDifficultyCmd() + }, + marshalled: `{"jsonrpc":"1.0","method":"notifystakedifficulty","params":[],"id":1}`, + unmarshalled: &dcrjson.NotifyStakeDifficultyCmd{}, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Marshal the command as created by the new static command + // creation function. + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + // Ensure the command is created without error via the generic + // new command creation function. + cmd, err := test.newCmd() + if err != nil { + t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ", + i, test.name, err) + } + + // Marshal the command as created by the generic new command + // creation function. + marshalled, err = dcrjson.MarshalCmd(testID, cmd) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + var request dcrjson.Request + if err := json.Unmarshal(marshalled, &request); err != nil { + t.Errorf("Test #%d (%s) unexpected error while "+ + "unmarshalling JSON-RPC request: %v", i, + test.name, err) + continue + } + + cmd, err = dcrjson.UnmarshalCmd(&request) + if err != nil { + t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !reflect.DeepEqual(cmd, test.unmarshalled) { + t.Errorf("Test #%d (%s) unexpected unmarshalled command "+ + "- got %s, want %s", i, test.name, + fmt.Sprintf("(%T) %+[1]v", cmd), + fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled)) + continue + } + } +} diff --git a/dcrjson/dcrwalletextresults.go b/dcrjson/dcrwalletextresults.go new file mode 100644 index 00000000..e175eb75 --- /dev/null +++ b/dcrjson/dcrwalletextresults.go @@ -0,0 +1,83 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson + +// GetMultisigOutInfoResult models the data returned from the getmultisigoutinfo +// command. +type GetMultisigOutInfoResult struct { + Address string `json:"address"` + RedeemScript string `json:"redeemscript"` + M uint8 `json:"m"` + N uint8 `json:"n"` + Pubkeys []string `json:"pubkeys"` + TxHash string `json:"txhash"` + BlockHeight uint32 `json:"blockheight"` + BlockHash string `json:"blockhash"` + Spent bool `json:"spent"` + SpentBy string `json:"spentby"` + SpentByIndex uint32 `json:"spentbyindex"` + Amount float64 `json:"amount"` +} + +// GetSeedResult models the data returned from the getseed +// command. +type GetSeedResult struct { + Seed string `json:"seed"` +} + +// GetSeedResult models the data returned from the getseed +// command. +type GetMasterPubkeyResult struct { + MasterPubkey string `json:"key"` +} + +// GetTicketsResult models the data returned from the getticketmaxprice +// command. +type GetTicketMaxPriceResult struct { + Price float64 `json:"price"` +} + +// GetTicketsResult models the data returned from the gettickets +// command. +type GetTicketsResult struct { + Hashes []string `json:"hashes"` +} + +// RedeemMultiSigOutResult models the data returned from the redeemmultisigout +// command. +type RedeemMultiSigOutResult struct { + Hex string `json:"hex"` + Complete bool `json:"complete"` + Errors []SignRawTransactionError `json:"errors,omitempty"` +} + +// RedeemMultiSigOutsResult models the data returned from the redeemmultisigouts +// command. +type RedeemMultiSigOutsResult struct { + Results []RedeemMultiSigOutResult `json:"results"` +} + +// SendToMultiSigResult models the data returned from the sendtomultisig +// command. +type SendToMultiSigResult struct { + TxHash string `json:"txhash"` + Address string `json:"address"` + RedeemScript string `json:"redeemscript"` +} + +// SignedTransaction is a signed transaction resulting from a signrawtransactions +// command. +type SignedTransaction struct { + SigningResult SignRawTransactionResult `json:"signingresult"` + Sent bool `json:"sent"` + TxHash *string `json:"txhash,omitempty"` +} + +// SignRawTransactionsResult models the data returned from the signrawtransactions +// command. +type SignRawTransactionsResult struct { + Results []SignedTransaction `json:"results"` +} diff --git a/dcrjson/dcrwalletextwsntfns.go b/dcrjson/dcrwalletextwsntfns.go new file mode 100644 index 00000000..bc22d3d5 --- /dev/null +++ b/dcrjson/dcrwalletextwsntfns.go @@ -0,0 +1,175 @@ +// Copyright (c) 2015-2016 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// NOTE: This file is intended to house the RPC websocket notifications that are +// supported by a chain server. + +package dcrjson + +const ( + // TicketPurchasedNtfnMethod is the method of the dcrwallet + // ticketpurchased notification. + TicketPurchasedNtfnMethod = "ticketpurchased" + + // VoteCreatedNtfnMethod is the method of the dcrwallet + // votecreated notification. + VoteCreatedNtfnMethod = "votecreated" + + // RevocationCreatedNtfnMethod is the method of the dcrwallet + // revocationcreated notification. + RevocationCreatedNtfnMethod = "revocationcreated" + + // WinningTicketsNtfnMethod is the method of the daemon + // winningtickets notification. + WinningTicketsNtfnMethod = "winningtickets" + + // SpentAndMissedTicketsNtfnMethod is the method of the daemon + // spentandmissedtickets notification. + SpentAndMissedTicketsNtfnMethod = "spentandmissedtickets" + + // NewTicketsNtfnMethod is the method of the daemon + // newtickets notification. + NewTicketsNtfnMethod = "newtickets" + + // StakeDifficultyNtfnMethod is the method of the daemon + // stakedifficulty notification. + StakeDifficultyNtfnMethod = "stakedifficulty" +) + +// TicketPurchasedNtfn is a type handling custom marshaling and +// unmarshaling of ticketpurchased JSON websocket notifications. +type TicketPurchasedNtfn struct { + TxHash string + Amount int64 // SStx only +} + +// NewTicketPurchasedNtfn creates a new TicketPurchasedNtfn. +func NewTicketPurchasedNtfn(txHash string, amount int64) *TicketPurchasedNtfn { + return &TicketPurchasedNtfn{ + TxHash: txHash, + Amount: amount, + } +} + +// VoteCreatedNtfn is a type handling custom marshaling and +// unmarshaling of ticketpurchased JSON websocket notifications. +type VoteCreatedNtfn struct { + TxHash string + BlockHash string + Height int32 + SStxIn string + VoteBits uint16 +} + +// NewVoteCreatedNtfn creates a new VoteCreatedNtfn. +func NewVoteCreatedNtfn(txHash string, blockHash string, height int32, sstxIn string, voteBits uint16) *VoteCreatedNtfn { + return &VoteCreatedNtfn{ + TxHash: txHash, + BlockHash: blockHash, + Height: height, + SStxIn: sstxIn, + VoteBits: voteBits, + } +} + +// RevocationCreatedNtfn is a type handling custom marshaling and +// unmarshaling of ticketpurchased JSON websocket notifications. +type RevocationCreatedNtfn struct { + TxHash string + SStxIn string +} + +// NewRevocationCreatedNtfn creates a new RevocationCreatedNtfn. +func NewRevocationCreatedNtfn(txHash string, sstxIn string) *RevocationCreatedNtfn { + return &RevocationCreatedNtfn{ + TxHash: txHash, + SStxIn: sstxIn, + } +} + +// WinningTicketsNtfn is a type handling custom marshaling and +// unmarshaling of blockconnected JSON websocket notifications. +type WinningTicketsNtfn struct { + BlockHash string + BlockHeight int32 + Tickets map[string]string +} + +// NewWinningTicketsNtfn creates a new WinningTicketsNtfn. +func NewWinningTicketsNtfn(hash string, height int32, tickets map[string]string) *WinningTicketsNtfn { + return &WinningTicketsNtfn{ + BlockHash: hash, + BlockHeight: height, + Tickets: tickets, + } +} + +// SpentAndMissedTicketsNtfn is a type handling custom marshaling and +// unmarshaling of spentandmissedtickets JSON websocket notifications. +type SpentAndMissedTicketsNtfn struct { + Hash string + Height int32 + StakeDiff int64 + Tickets map[string]string +} + +// NewSpentAndMissedTicketsNtfn creates a new SpentAndMissedTicketsNtfn. +func NewSpentAndMissedTicketsNtfn(hash string, height int32, stakeDiff int64, tickets map[string]string) *SpentAndMissedTicketsNtfn { + return &SpentAndMissedTicketsNtfn{ + Hash: hash, + Height: height, + StakeDiff: stakeDiff, + Tickets: tickets, + } +} + +// NewTicketsNtfn is a type handling custom marshaling and +// unmarshaling of newtickets JSON websocket notifications. +type NewTicketsNtfn struct { + Hash string + Height int32 + StakeDiff int64 + Tickets []string +} + +// NewNewTicketsNtfn creates a new NewTicketsNtfn. +func NewNewTicketsNtfn(hash string, height int32, stakeDiff int64, tickets []string) *NewTicketsNtfn { + return &NewTicketsNtfn{ + Hash: hash, + Height: height, + StakeDiff: stakeDiff, + Tickets: tickets, + } +} + +// StakeDifficultyNtfn is a type handling custom marshaling and +// unmarshaling of stakedifficulty JSON websocket notifications. +type StakeDifficultyNtfn struct { + BlockHash string + BlockHeight int32 + StakeDiff int64 +} + +// NewStakeDifficultyNtfn creates a new StakeDifficultyNtfn. +func NewStakeDifficultyNtfn(hash string, height int32, stakeDiff int64) *StakeDifficultyNtfn { + return &StakeDifficultyNtfn{ + BlockHash: hash, + BlockHeight: height, + StakeDiff: stakeDiff, + } +} + +func init() { + // The commands in this file are only usable by websockets and are + // notifications. + flags := UFWalletOnly | UFWebsocketOnly | UFNotification + + MustRegisterCmd(TicketPurchasedNtfnMethod, (*TicketPurchasedNtfn)(nil), flags) + MustRegisterCmd(VoteCreatedNtfnMethod, (*VoteCreatedNtfn)(nil), flags) + MustRegisterCmd(RevocationCreatedNtfnMethod, (*RevocationCreatedNtfn)(nil), flags) + MustRegisterCmd(WinningTicketsNtfnMethod, (*WinningTicketsNtfn)(nil), flags) + MustRegisterCmd(SpentAndMissedTicketsNtfnMethod, (*SpentAndMissedTicketsNtfn)(nil), flags) + MustRegisterCmd(NewTicketsNtfnMethod, (*NewTicketsNtfn)(nil), flags) + MustRegisterCmd(StakeDifficultyNtfnMethod, (*StakeDifficultyNtfn)(nil), flags) +} diff --git a/dcrjson/dcrwalletextwsntfns_test.go b/dcrjson/dcrwalletextwsntfns_test.go new file mode 100644 index 00000000..0c345d95 --- /dev/null +++ b/dcrjson/dcrwalletextwsntfns_test.go @@ -0,0 +1,191 @@ +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson_test + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/decred/dcrd/dcrjson" +) + +// TestChainSvrWsNtfns tests all of the chain server websocket-specific +// notifications marshal and unmarshal into valid results include handling of +// optional fields being omitted in the marshalled command, while optional +// fields with defaults have the default assigned on unmarshalled commands. +func TestDcrwalletChainSvrWsNtfns(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + newNtfn func() (interface{}, error) + staticNtfn func() interface{} + marshalled string + unmarshalled interface{} + }{ + { + name: "ticketpurchase", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("ticketpurchased", "123", 5) + }, + staticNtfn: func() interface{} { + return dcrjson.NewTicketPurchasedNtfn("123", 5) + }, + marshalled: `{"jsonrpc":"1.0","method":"ticketpurchased","params":["123",5],"id":null}`, + unmarshalled: &dcrjson.TicketPurchasedNtfn{ + TxHash: "123", + Amount: 5, + }, + }, + { + name: "votecreated", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("votecreated", "123", "1234", 100, "12345", 1) + }, + staticNtfn: func() interface{} { + return dcrjson.NewVoteCreatedNtfn("123", "1234", 100, "12345", 1) + }, + marshalled: `{"jsonrpc":"1.0","method":"votecreated","params":["123","1234",100,"12345",1],"id":null}`, + unmarshalled: &dcrjson.VoteCreatedNtfn{ + TxHash: "123", + BlockHash: "1234", + Height: 100, + SStxIn: "12345", + VoteBits: 1, + }, + }, + { + name: "revocationcreated", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("revocationcreated", "123", "1234") + }, + staticNtfn: func() interface{} { + return dcrjson.NewRevocationCreatedNtfn("123", "1234") + }, + marshalled: `{"jsonrpc":"1.0","method":"revocationcreated","params":["123","1234"],"id":null}`, + unmarshalled: &dcrjson.RevocationCreatedNtfn{ + TxHash: "123", + SStxIn: "1234", + }, + }, + { + name: "winningtickets", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("winningtickets", "123", 100, map[string]string{"a": "b"}) + }, + staticNtfn: func() interface{} { + return dcrjson.NewWinningTicketsNtfn("123", 100, map[string]string{"a": "b"}) + }, + marshalled: `{"jsonrpc":"1.0","method":"winningtickets","params":["123",100,{"a":"b"}],"id":null}`, + unmarshalled: &dcrjson.WinningTicketsNtfn{ + BlockHash: "123", + BlockHeight: 100, + Tickets: map[string]string{"a": "b"}, + }, + }, + { + name: "spentandmissedtickets", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("spentandmissedtickets", "123", 100, 3, map[string]string{"a": "b"}) + }, + staticNtfn: func() interface{} { + return dcrjson.NewSpentAndMissedTicketsNtfn("123", 100, 3, map[string]string{"a": "b"}) + }, + marshalled: `{"jsonrpc":"1.0","method":"spentandmissedtickets","params":["123",100,3,{"a":"b"}],"id":null}`, + unmarshalled: &dcrjson.SpentAndMissedTicketsNtfn{ + Hash: "123", + Height: 100, + StakeDiff: 3, + Tickets: map[string]string{"a": "b"}, + }, + }, + { + name: "newtickets", + newNtfn: func() (interface{}, error) { + return dcrjson.NewCmd("newtickets", "123", 100, 3, []string{"a", "b"}) + }, + staticNtfn: func() interface{} { + return dcrjson.NewNewTicketsNtfn("123", 100, 3, []string{"a", "b"}) + }, + marshalled: `{"jsonrpc":"1.0","method":"newtickets","params":["123",100,3,["a","b"]],"id":null}`, + unmarshalled: &dcrjson.NewTicketsNtfn{ + Hash: "123", + Height: 100, + StakeDiff: 3, + Tickets: []string{"a", "b"}, + }, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Marshal the notification as created by the new static + // creation function. The ID is nil for notifications. + marshalled, err := dcrjson.MarshalCmd(nil, test.staticNtfn()) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + // Ensure the notification is created without error via the + // generic new notification creation function. + cmd, err := test.newNtfn() + if err != nil { + t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ", + i, test.name, err) + } + + // Marshal the notification as created by the generic new + // notification creation function. The ID is nil for + // notifications. + marshalled, err = dcrjson.MarshalCmd(nil, cmd) + if err != nil { + t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !bytes.Equal(marshalled, []byte(test.marshalled)) { + t.Errorf("Test #%d (%s) unexpected marshalled data - "+ + "got %s, want %s", i, test.name, marshalled, + test.marshalled) + continue + } + + var request dcrjson.Request + if err := json.Unmarshal(marshalled, &request); err != nil { + t.Errorf("Test #%d (%s) unexpected error while "+ + "unmarshalling JSON-RPC request: %v", i, + test.name, err) + continue + } + + cmd, err = dcrjson.UnmarshalCmd(&request) + if err != nil { + t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, + test.name, err) + continue + } + + if !reflect.DeepEqual(cmd, test.unmarshalled) { + t.Errorf("Test #%d (%s) unexpected unmarshalled command "+ + "- got %s, want %s", i, test.name, + fmt.Sprintf("(%T) %+[1]v", cmd), + fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled)) + continue + } + } +} diff --git a/btcjson/doc.go b/dcrjson/doc.go similarity index 97% rename from btcjson/doc.go rename to dcrjson/doc.go index 165b9ef9..49be3f29 100644 --- a/btcjson/doc.go +++ b/dcrjson/doc.go @@ -1,9 +1,10 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package btcjson provides primitives for working with the bitcoin JSON-RPC API. +Package dcrjson provides primitives for working with the decred JSON-RPC API. Overview @@ -137,10 +138,10 @@ returned from the various functions available in this package. They identify issues such as unsupported field types, attempts to register malformed commands, and attempting to create a new command with an improper number of parameters. The specific reason for the error can be detected by type asserting it to a -*btcjson.Error and accessing the ErrorCode field. +*dcrjson.Error and accessing the ErrorCode field. The second category of errors (type RPCError), on the other hand, are useful for returning errors to RPC clients. Consequently, they are used in the previously described Response type. */ -package btcjson +package dcrjson diff --git a/btcjson/error.go b/dcrjson/error.go similarity index 93% rename from btcjson/error.go rename to dcrjson/error.go index 3d72329f..8d88c0c2 100644 --- a/btcjson/error.go +++ b/dcrjson/error.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "fmt" @@ -96,16 +97,16 @@ func (e ErrorCode) String() string { // a JSON-RPC Response. The caller can use type assertions to determine the // specific error and access the ErrorCode field. type Error struct { - ErrorCode ErrorCode // Describes the kind of error - Description string // Human readable description of the issue + Code ErrorCode // Describes the kind of error + Message string // Human readable description of the issue } // Error satisfies the error interface and prints human-readable errors. func (e Error) Error() string { - return e.Description + return e.Message } // makeError creates an Error given a set of arguments. func makeError(c ErrorCode, desc string) Error { - return Error{ErrorCode: c, Description: desc} + return Error{Code: c, Message: desc} } diff --git a/btcjson/error_test.go b/dcrjson/error_test.go similarity index 55% rename from btcjson/error_test.go rename to dcrjson/error_test.go index 8eb93c75..ae343ff5 100644 --- a/btcjson/error_test.go +++ b/dcrjson/error_test.go @@ -1,13 +1,14 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestErrorCodeStringer tests the stringized output for the ErrorCode type. @@ -15,26 +16,26 @@ func TestErrorCodeStringer(t *testing.T) { t.Parallel() tests := []struct { - in btcjson.ErrorCode + in dcrjson.ErrorCode want string }{ - {btcjson.ErrDuplicateMethod, "ErrDuplicateMethod"}, - {btcjson.ErrInvalidUsageFlags, "ErrInvalidUsageFlags"}, - {btcjson.ErrInvalidType, "ErrInvalidType"}, - {btcjson.ErrEmbeddedType, "ErrEmbeddedType"}, - {btcjson.ErrUnexportedField, "ErrUnexportedField"}, - {btcjson.ErrUnsupportedFieldType, "ErrUnsupportedFieldType"}, - {btcjson.ErrNonOptionalField, "ErrNonOptionalField"}, - {btcjson.ErrNonOptionalDefault, "ErrNonOptionalDefault"}, - {btcjson.ErrMismatchedDefault, "ErrMismatchedDefault"}, - {btcjson.ErrUnregisteredMethod, "ErrUnregisteredMethod"}, - {btcjson.ErrNumParams, "ErrNumParams"}, - {btcjson.ErrMissingDescription, "ErrMissingDescription"}, + {dcrjson.ErrDuplicateMethod, "ErrDuplicateMethod"}, + {dcrjson.ErrInvalidUsageFlags, "ErrInvalidUsageFlags"}, + {dcrjson.ErrInvalidType, "ErrInvalidType"}, + {dcrjson.ErrEmbeddedType, "ErrEmbeddedType"}, + {dcrjson.ErrUnexportedField, "ErrUnexportedField"}, + {dcrjson.ErrUnsupportedFieldType, "ErrUnsupportedFieldType"}, + {dcrjson.ErrNonOptionalField, "ErrNonOptionalField"}, + {dcrjson.ErrNonOptionalDefault, "ErrNonOptionalDefault"}, + {dcrjson.ErrMismatchedDefault, "ErrMismatchedDefault"}, + {dcrjson.ErrUnregisteredMethod, "ErrUnregisteredMethod"}, + {dcrjson.ErrNumParams, "ErrNumParams"}, + {dcrjson.ErrMissingDescription, "ErrMissingDescription"}, {0xffff, "Unknown ErrorCode (65535)"}, } // Detect additional error codes that don't have the stringer added. - if len(tests)-1 != int(btcjson.TstNumErrorCodes) { + if len(tests)-1 != int(dcrjson.TstNumErrorCodes) { t.Errorf("It appears an error code was added without adding an " + "associated stringer test") } @@ -55,15 +56,15 @@ func TestError(t *testing.T) { t.Parallel() tests := []struct { - in btcjson.Error + in dcrjson.Error want string }{ { - btcjson.Error{Description: "some error"}, + dcrjson.Error{Message: "some error"}, "some error", }, { - btcjson.Error{Description: "human-readable error"}, + dcrjson.Error{Message: "human-readable error"}, "human-readable error", }, } diff --git a/btcjson/example_test.go b/dcrjson/example_test.go similarity index 90% rename from btcjson/example_test.go rename to dcrjson/example_test.go index 527252c7..98ace923 100644 --- a/btcjson/example_test.go +++ b/dcrjson/example_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "encoding/json" "fmt" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // This example demonstrates how to create and marshal a command into a JSON-RPC @@ -17,17 +18,17 @@ func ExampleMarshalCmd() { // Create a new getblock command. Notice the nil parameter indicates // to use the default parameter for that fields. This is a common // pattern used in all of the NewCmd functions in this package for - // optional fields. Also, notice the call to btcjson.Bool which is a + // optional fields. Also, notice the call to dcrjson.Bool which is a // convenience function for creating a pointer out of a primitive for // optional parameters. blockHash := "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" - gbCmd := btcjson.NewGetBlockCmd(blockHash, btcjson.Bool(false), nil) + gbCmd := dcrjson.NewGetBlockCmd(blockHash, dcrjson.Bool(false), nil) // Marshal the command to the format suitable for sending to the RPC // server. Typically the client would increment the id here which is // request so the response can be identified. id := 1 - marshalledBytes, err := btcjson.MarshalCmd(id, gbCmd) + marshalledBytes, err := dcrjson.MarshalCmd(id, gbCmd) if err != nil { fmt.Println(err) return @@ -49,7 +50,7 @@ func ExampleUnmarshalCmd() { data := []byte(`{"jsonrpc":"1.0","method":"getblock","params":["000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",false],"id":1}`) // Unmarshal the raw bytes from the wire into a JSON-RPC request. - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(data, &request); err != nil { fmt.Println(err) return @@ -69,14 +70,14 @@ func ExampleUnmarshalCmd() { } // Unmarshal the request into a concrete command. - cmd, err := btcjson.UnmarshalCmd(&request) + cmd, err := dcrjson.UnmarshalCmd(&request) if err != nil { fmt.Println(err) return } // Type assert the command to the appropriate type. - gbCmd, ok := cmd.(*btcjson.GetBlockCmd) + gbCmd, ok := cmd.(*dcrjson.GetBlockCmd) if !ok { fmt.Printf("Incorrect command type: %T\n", cmd) return @@ -97,7 +98,7 @@ func ExampleUnmarshalCmd() { func ExampleMarshalResponse() { // Marshal a new JSON-RPC response. For example, this is a response // to a getblockheight request. - marshalledBytes, err := btcjson.MarshalResponse(1, 350001, nil) + marshalledBytes, err := dcrjson.MarshalResponse(1, 350001, nil) if err != nil { fmt.Println(err) return @@ -121,7 +122,7 @@ func Example_unmarshalResponse() { data := []byte(`{"result":350001,"error":null,"id":1}`) // Unmarshal the raw bytes from the wire into a JSON-RPC response. - var response btcjson.Response + var response dcrjson.Response if err := json.Unmarshal(data, &response); err != nil { fmt.Println("Malformed JSON-RPC response:", err) return diff --git a/btcjson/export_test.go b/dcrjson/export_test.go similarity index 96% rename from btcjson/export_test.go rename to dcrjson/export_test.go index e0246fc2..32738e88 100644 --- a/btcjson/export_test.go +++ b/dcrjson/export_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson // TstHighestUsageFlagBit makes the internal highestUsageFlagBit parameter // available to the test package. diff --git a/btcjson/help.go b/dcrjson/help.go similarity index 99% rename from btcjson/help.go rename to dcrjson/help.go index 113960fd..bd80ed2d 100644 --- a/btcjson/help.go +++ b/dcrjson/help.go @@ -1,8 +1,9 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "bytes" diff --git a/btcjson/help_test.go b/dcrjson/help_test.go similarity index 94% rename from btcjson/help_test.go rename to dcrjson/help_test.go index 265e0c2c..a3e065c1 100644 --- a/btcjson/help_test.go +++ b/dcrjson/help_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestHelpReflectInternals ensures the various help functions which deal with @@ -237,7 +238,7 @@ func TestHelpReflectInternals(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Ensure the description key is the expected value. - key := btcjson.TstReflectTypeToJSONType(xT, test.reflectType) + key := dcrjson.TstReflectTypeToJSONType(xT, test.reflectType) if key != test.key { t.Errorf("Test #%d (%s) unexpected key - got: %v, "+ "want: %v", i, test.name, key, test.key) @@ -245,7 +246,7 @@ func TestHelpReflectInternals(t *testing.T) { } // Ensure the generated example is as expected. - examples, isComplex := btcjson.TstReflectTypeToJSONExample(xT, + examples, isComplex := dcrjson.TstReflectTypeToJSONExample(xT, test.reflectType, test.indentLevel, "fdk") if isComplex != test.isComplex { t.Errorf("Test #%d (%s) unexpected isComplex - got: %v, "+ @@ -269,7 +270,7 @@ func TestHelpReflectInternals(t *testing.T) { } // Ensure the generated result type help is as expected. - helpText := btcjson.TstResultTypeHelp(xT, test.reflectType, "fdk") + helpText := dcrjson.TstResultTypeHelp(xT, test.reflectType, "fdk") if helpText != test.help { t.Errorf("Test #%d (%s) unexpected result help - "+ "got: %v, want: %v", i, test.name, helpText, @@ -277,7 +278,7 @@ func TestHelpReflectInternals(t *testing.T) { continue } - isValid := btcjson.TstIsValidResultType(test.reflectType.Kind()) + isValid := dcrjson.TstIsValidResultType(test.reflectType.Kind()) if isValid != !test.isInvalid { t.Errorf("Test #%d (%s) unexpected result type validity "+ "- got: %v", i, test.name, isValid) @@ -402,7 +403,7 @@ func TestResultStructHelp(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - results := btcjson.TstResultStructHelp(xT, test.reflectType, 0) + results := dcrjson.TstResultStructHelp(xT, test.reflectType, 0) if len(results) != len(test.expected) { t.Errorf("Test #%d (%s) unexpected result length - "+ "got: %v, want: %v", i, test.name, len(results), @@ -555,7 +556,7 @@ func TestHelpArgInternals(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - help := btcjson.TstArgHelp(xT, test.reflectType, test.defaults, + help := dcrjson.TstArgHelp(xT, test.reflectType, test.defaults, test.method) if help != test.help { t.Errorf("Test #%d (%s) unexpected help - got:\n%v\n"+ @@ -648,7 +649,7 @@ func TestMethodHelp(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - help := btcjson.TestMethodHelp(xT, test.reflectType, + help := dcrjson.TestMethodHelp(xT, test.reflectType, test.defaults, test.method, test.resultTypes) if help != test.help { t.Errorf("Test #%d (%s) unexpected help - got:\n%v\n"+ @@ -667,47 +668,47 @@ func TestGenerateHelpErrors(t *testing.T) { name string method string resultTypes []interface{} - err btcjson.Error + err dcrjson.Error }{ { name: "unregistered command", method: "boguscommand", - err: btcjson.Error{ErrorCode: btcjson.ErrUnregisteredMethod}, + err: dcrjson.Error{Code: dcrjson.ErrUnregisteredMethod}, }, { name: "non-pointer result type", method: "help", resultTypes: []interface{}{0}, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid result type", method: "help", resultTypes: []interface{}{(*complex64)(nil)}, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "missing description", method: "help", resultTypes: []interface{}{(*string)(nil), nil}, - err: btcjson.Error{ErrorCode: btcjson.ErrMissingDescription}, + err: dcrjson.Error{Code: dcrjson.ErrMissingDescription}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - _, err := btcjson.GenerateHelp(test.method, nil, + _, err := dcrjson.GenerateHelp(test.method, nil, test.resultTypes...) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T (%[2]v), "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v (%v), want %v", i, test.name, gotErrorCode, - err, test.err.ErrorCode) + err, test.err.Code) continue } } @@ -723,7 +724,7 @@ func TestGenerateHelp(t *testing.T) { "help--synopsis": "test", "help-command": "test", } - help, err := btcjson.GenerateHelp("help", descs) + help, err := dcrjson.GenerateHelp("help", descs) if err != nil { t.Fatalf("GenerateHelp: unexpected error: %v", err) } diff --git a/btcjson/helpers.go b/dcrjson/helpers.go similarity index 97% rename from btcjson/helpers.go rename to dcrjson/helpers.go index d9b452e7..f3cd9916 100644 --- a/btcjson/helpers.go +++ b/dcrjson/helpers.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson // Bool is a helper routine that allocates a new bool value to store v and // returns a pointer to it. This is useful when assigning optional parameters. diff --git a/btcjson/helpers_test.go b/dcrjson/helpers_test.go similarity index 84% rename from btcjson/helpers_test.go rename to dcrjson/helpers_test.go index 7bcaf4bc..83c59925 100644 --- a/btcjson/helpers_test.go +++ b/dcrjson/helpers_test.go @@ -1,14 +1,15 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestHelpers tests the various helper functions which create pointers to @@ -24,7 +25,7 @@ func TestHelpers(t *testing.T) { { name: "bool", f: func() interface{} { - return btcjson.Bool(true) + return dcrjson.Bool(true) }, expected: func() interface{} { val := true @@ -34,7 +35,7 @@ func TestHelpers(t *testing.T) { { name: "int", f: func() interface{} { - return btcjson.Int(5) + return dcrjson.Int(5) }, expected: func() interface{} { val := int(5) @@ -44,7 +45,7 @@ func TestHelpers(t *testing.T) { { name: "uint", f: func() interface{} { - return btcjson.Uint(5) + return dcrjson.Uint(5) }, expected: func() interface{} { val := uint(5) @@ -54,7 +55,7 @@ func TestHelpers(t *testing.T) { { name: "int32", f: func() interface{} { - return btcjson.Int32(5) + return dcrjson.Int32(5) }, expected: func() interface{} { val := int32(5) @@ -64,7 +65,7 @@ func TestHelpers(t *testing.T) { { name: "uint32", f: func() interface{} { - return btcjson.Uint32(5) + return dcrjson.Uint32(5) }, expected: func() interface{} { val := uint32(5) @@ -74,7 +75,7 @@ func TestHelpers(t *testing.T) { { name: "int64", f: func() interface{} { - return btcjson.Int64(5) + return dcrjson.Int64(5) }, expected: func() interface{} { val := int64(5) @@ -84,7 +85,7 @@ func TestHelpers(t *testing.T) { { name: "uint64", f: func() interface{} { - return btcjson.Uint64(5) + return dcrjson.Uint64(5) }, expected: func() interface{} { val := uint64(5) @@ -94,7 +95,7 @@ func TestHelpers(t *testing.T) { { name: "string", f: func() interface{} { - return btcjson.String("abc") + return dcrjson.String("abc") }, expected: func() interface{} { val := "abc" diff --git a/dcrjson/jsonerr.go b/dcrjson/jsonerr.go new file mode 100644 index 00000000..671a24b2 --- /dev/null +++ b/dcrjson/jsonerr.go @@ -0,0 +1,176 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dcrjson + +// Standard JSON-RPC 2.0 errors +var ( + ErrInvalidRequest = Error{ + Code: -32600, + Message: "Invalid request", + } + ErrMethodNotFound = Error{ + Code: -32601, + Message: "Method not found", + } + ErrInvalidParams = Error{ + Code: -32602, + Message: "Invalid paramaters", + } + ErrInternal = Error{ + Code: -32603, + Message: "Internal error", + } + ErrParse = Error{ + Code: -32700, + Message: "Parse error", + } +) + +// General application defined JSON errors +var ( + ErrMisc = Error{ + Code: -1, + Message: "Miscellaneous error", + } + ErrForbiddenBySafeMode = Error{ + Code: -2, + Message: "Server is in safe mode, and command is not allowed in safe mode", + } + ErrType = Error{ + Code: -3, + Message: "Unexpected type was passed as parameter", + } + ErrInvalidAddressOrKey = Error{ + Code: -5, + Message: "Invalid address or key", + } + ErrOutOfMemory = Error{ + Code: -7, + Message: "Ran out of memory during operation", + } + ErrInvalidParameter = Error{ + Code: -8, + Message: "Invalid, missing or duplicate parameter", + } + ErrDatabase = Error{ + Code: -20, + Message: "Database error", + } + ErrDeserialization = Error{ + Code: -22, + Message: "Error parsing or validating structure in raw format", + } +) + +// Peer-to-peer client errors +var ( + ErrClientNotConnected = Error{ + Code: -9, + Message: "dcrd is not connected", + } + ErrClientInInitialDownload = Error{ + Code: -10, + Message: "dcrd is downloading blocks...", + } +) + +// Wallet JSON errors +var ( + ErrWallet = Error{ + Code: -4, + Message: "Unspecified problem with wallet", + } + ErrWalletInsufficientFunds = Error{ + Code: -6, + Message: "Not enough funds in wallet or account", + } + ErrWalletInvalidAccountName = Error{ + Code: -11, + Message: "Invalid account name", + } + ErrWalletKeypoolRanOut = Error{ + Code: -12, + Message: "Keypool ran out, call keypoolrefill first", + } + ErrWalletUnlockNeeded = Error{ + Code: -13, + Message: "Enter the wallet passphrase with walletpassphrase first", + } + ErrWalletPassphraseIncorrect = Error{ + Code: -14, + Message: "The wallet passphrase entered was incorrect", + } + ErrWalletWrongEncState = Error{ + Code: -15, + Message: "Command given in wrong wallet encryption state", + } + ErrWalletEncryptionFailed = Error{ + Code: -16, + Message: "Failed to encrypt the wallet", + } + ErrWalletAlreadyUnlocked = Error{ + Code: -17, + Message: "Wallet is already unlocked", + } +) + +// Specific Errors related to commands. These are the ones a user of the rpc +// server are most likely to see. Generally, the codes should match one of the +// more general errors above. +var ( + ErrBlockNotFound = Error{ + Code: -5, + Message: "Block not found", + } + ErrBlockCount = Error{ + Code: -5, + Message: "Error getting block count", + } + ErrBestBlockHash = Error{ + Code: -5, + Message: "Error getting best block hash", + } + ErrDifficulty = Error{ + Code: -5, + Message: "Error getting difficulty", + } + ErrOutOfRange = Error{ + Code: -1, + Message: "Block number out of range", + } + ErrNoTxInfo = Error{ + Code: -5, + Message: "No information available about transaction", + } + ErrNoNewestBlockInfo = Error{ + Code: -5, + Message: "No information about newest block", + } + ErrInvalidTxVout = Error{ + Code: -5, + Message: "Ouput index number (vout) does not exist for transaction.", + } + ErrRawTxString = Error{ + Code: -32602, + Message: "Raw tx is not a string", + } + ErrDecodeHexString = Error{ + Code: -22, + Message: "Unable to decode hex string", + } +) + +// Errors that are specific to dcrd. +var ( + ErrNoWallet = Error{ + Code: -1, + Message: "This implementation does not implement wallet commands", + } + ErrUnimplemented = Error{ + Code: -1, + Message: "Command unimplemented", + } +) diff --git a/btcjson/jsonrpc.go b/dcrjson/jsonrpc.go similarity index 98% rename from btcjson/jsonrpc.go rename to dcrjson/jsonrpc.go index e99d9f42..e494f9b1 100644 --- a/btcjson/jsonrpc.go +++ b/dcrjson/jsonrpc.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "encoding/json" diff --git a/btcjson/jsonrpc_test.go b/dcrjson/jsonrpc_test.go similarity index 82% rename from btcjson/jsonrpc_test.go rename to dcrjson/jsonrpc_test.go index 7a5d7561..834047df 100644 --- a/btcjson/jsonrpc_test.go +++ b/dcrjson/jsonrpc_test.go @@ -1,15 +1,16 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "encoding/json" "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestIsValidIDType ensures the IsValidIDType function behaves as expected. @@ -44,7 +45,7 @@ func TestIsValidIDType(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { - if btcjson.IsValidIDType(test.id) != test.isValid { + if dcrjson.IsValidIDType(test.id) != test.isValid { t.Errorf("Test #%d (%s) valid mismatch - got %v, "+ "want %v", i, test.name, !test.isValid, test.isValid) @@ -61,7 +62,7 @@ func TestMarshalResponse(t *testing.T) { tests := []struct { name string result interface{} - jsonErr *btcjson.RPCError + jsonErr *dcrjson.RPCError expected []byte }{ { @@ -73,8 +74,8 @@ func TestMarshalResponse(t *testing.T) { { name: "result with error", result: nil, - jsonErr: func() *btcjson.RPCError { - return btcjson.NewRPCError(btcjson.ErrRPCBlockNotFound, "123 not found") + jsonErr: func() *dcrjson.RPCError { + return dcrjson.NewRPCError(dcrjson.ErrRPCBlockNotFound, "123 not found") }(), expected: []byte(`{"result":null,"error":{"code":-5,"message":"123 not found"},"id":1}`), }, @@ -83,7 +84,7 @@ func TestMarshalResponse(t *testing.T) { t.Logf("Running %d tests", len(tests)) for i, test := range tests { _, _ = i, test - marshalled, err := btcjson.MarshalResponse(testID, test.result, test.jsonErr) + marshalled, err := dcrjson.MarshalResponse(testID, test.result, test.jsonErr) if err != nil { t.Errorf("Test #%d (%s) unexpected error: %v", i, test.name, err) @@ -104,7 +105,7 @@ func TestMiscErrors(t *testing.T) { // Force an error in NewRequest by giving it a parameter type that is // not supported. - _, err := btcjson.NewRequest(nil, "test", []interface{}{make(chan int)}) + _, err := dcrjson.NewRequest(nil, "test", []interface{}{make(chan int)}) if err == nil { t.Error("NewRequest: did not receive error") return @@ -112,9 +113,9 @@ func TestMiscErrors(t *testing.T) { // Force an error in MarshalResponse by giving it an id type that is not // supported. - wantErr := btcjson.Error{ErrorCode: btcjson.ErrInvalidType} - _, err = btcjson.MarshalResponse(make(chan int), nil, nil) - if jerr, ok := err.(btcjson.Error); !ok || jerr.ErrorCode != wantErr.ErrorCode { + wantErr := dcrjson.Error{Code: dcrjson.ErrInvalidType} + _, err = dcrjson.MarshalResponse(make(chan int), nil, nil) + if jerr, ok := err.(dcrjson.Error); !ok || jerr.Code != wantErr.Code { t.Errorf("MarshalResult: did not receive expected error - got "+ "%v (%[1]T), want %v (%[2]T)", err, wantErr) return @@ -122,7 +123,7 @@ func TestMiscErrors(t *testing.T) { // Force an error in MarshalResponse by giving it a result type that // can't be marshalled. - _, err = btcjson.MarshalResponse(1, make(chan int), nil) + _, err = dcrjson.MarshalResponse(1, make(chan int), nil) if _, ok := err.(*json.UnsupportedTypeError); !ok { wantErr := &json.UnsupportedTypeError{} t.Errorf("MarshalResult: did not receive expected error - got "+ @@ -136,15 +137,15 @@ func TestRPCError(t *testing.T) { t.Parallel() tests := []struct { - in *btcjson.RPCError + in *dcrjson.RPCError want string }{ { - btcjson.ErrRPCInvalidRequest, + dcrjson.ErrRPCInvalidRequest, "-32600: Invalid request", }, { - btcjson.ErrRPCMethodNotFound, + dcrjson.ErrRPCMethodNotFound, "-32601: Method not found", }, } diff --git a/btcjson/jsonrpcerr.go b/dcrjson/jsonrpcerr.go similarity index 97% rename from btcjson/jsonrpcerr.go rename to dcrjson/jsonrpcerr.go index 16a02416..a104234c 100644 --- a/btcjson/jsonrpcerr.go +++ b/dcrjson/jsonrpcerr.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson // Standard JSON-RPC 2.0 errors. var ( diff --git a/btcjson/register.go b/dcrjson/register.go similarity index 99% rename from btcjson/register.go rename to dcrjson/register.go index 5de001c9..40ef6cbe 100644 --- a/btcjson/register.go +++ b/dcrjson/register.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson import ( "encoding/json" diff --git a/btcjson/register_test.go b/dcrjson/register_test.go similarity index 75% rename from btcjson/register_test.go rename to dcrjson/register_test.go index 34c97124..da9e207d 100644 --- a/btcjson/register_test.go +++ b/dcrjson/register_test.go @@ -1,15 +1,16 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "reflect" "sort" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestUsageFlagStringer tests the stringized output for the UsageFlag type. @@ -17,22 +18,22 @@ func TestUsageFlagStringer(t *testing.T) { t.Parallel() tests := []struct { - in btcjson.UsageFlag + in dcrjson.UsageFlag want string }{ {0, "0x0"}, - {btcjson.UFWalletOnly, "UFWalletOnly"}, - {btcjson.UFWebsocketOnly, "UFWebsocketOnly"}, - {btcjson.UFNotification, "UFNotification"}, - {btcjson.UFWalletOnly | btcjson.UFWebsocketOnly, + {dcrjson.UFWalletOnly, "UFWalletOnly"}, + {dcrjson.UFWebsocketOnly, "UFWebsocketOnly"}, + {dcrjson.UFNotification, "UFNotification"}, + {dcrjson.UFWalletOnly | dcrjson.UFWebsocketOnly, "UFWalletOnly|UFWebsocketOnly"}, - {btcjson.UFWalletOnly | btcjson.UFWebsocketOnly | (1 << 31), + {dcrjson.UFWalletOnly | dcrjson.UFWebsocketOnly | (1 << 31), "UFWalletOnly|UFWebsocketOnly|0x80000000"}, } // Detect additional usage flags that don't have the stringer added. numUsageFlags := 0 - highestUsageFlagBit := btcjson.TstHighestUsageFlagBit + highestUsageFlagBit := dcrjson.TstHighestUsageFlagBit for highestUsageFlagBit > 1 { numUsageFlags++ highestUsageFlagBit >>= 1 @@ -62,8 +63,8 @@ func TestRegisterCmdErrors(t *testing.T) { name string method string cmdFunc func() interface{} - flags btcjson.UsageFlag - err btcjson.Error + flags dcrjson.UsageFlag + err dcrjson.Error }{ { name: "duplicate method", @@ -71,7 +72,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return struct{}{} }, - err: btcjson.Error{ErrorCode: btcjson.ErrDuplicateMethod}, + err: dcrjson.Error{Code: dcrjson.ErrDuplicateMethod}, }, { name: "invalid usage flags", @@ -79,8 +80,8 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return 0 }, - flags: btcjson.TstHighestUsageFlagBit, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidUsageFlags}, + flags: dcrjson.TstHighestUsageFlagBit, + err: dcrjson.Error{Code: dcrjson.ErrInvalidUsageFlags}, }, { name: "invalid type", @@ -88,7 +89,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return 0 }, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "invalid type 2", @@ -96,7 +97,7 @@ func TestRegisterCmdErrors(t *testing.T) { cmdFunc: func() interface{} { return &[]string{} }, - err: btcjson.Error{ErrorCode: btcjson.ErrInvalidType}, + err: dcrjson.Error{Code: dcrjson.ErrInvalidType}, }, { name: "embedded field", @@ -105,7 +106,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ int } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrEmbeddedType}, + err: dcrjson.Error{Code: dcrjson.ErrEmbeddedType}, }, { name: "unexported field", @@ -114,7 +115,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ a int } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnexportedField}, + err: dcrjson.Error{Code: dcrjson.ErrUnexportedField}, }, { name: "unsupported field type 1", @@ -123,7 +124,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A **int } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "unsupported field type 2", @@ -132,7 +133,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A chan int } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "unsupported field type 3", @@ -141,7 +142,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A complex64 } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "unsupported field type 4", @@ -150,7 +151,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A complex128 } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "unsupported field type 5", @@ -159,7 +160,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A func() } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "unsupported field type 6", @@ -168,7 +169,7 @@ func TestRegisterCmdErrors(t *testing.T) { type test struct{ A interface{} } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrUnsupportedFieldType}, + err: dcrjson.Error{Code: dcrjson.ErrUnsupportedFieldType}, }, { name: "required after optional", @@ -180,7 +181,7 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrNonOptionalField}, + err: dcrjson.Error{Code: dcrjson.ErrNonOptionalField}, }, { name: "non-optional with default", @@ -191,7 +192,7 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrNonOptionalDefault}, + err: dcrjson.Error{Code: dcrjson.ErrNonOptionalDefault}, }, { name: "mismatched default", @@ -202,24 +203,24 @@ func TestRegisterCmdErrors(t *testing.T) { } return (*test)(nil) }, - err: btcjson.Error{ErrorCode: btcjson.ErrMismatchedDefault}, + err: dcrjson.Error{Code: dcrjson.ErrMismatchedDefault}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { - err := btcjson.RegisterCmd(test.method, test.cmdFunc(), + err := dcrjson.RegisterCmd(test.method, test.cmdFunc(), test.flags) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Test #%d (%s) wrong error - got %T, "+ "want %T", i, test.name, err, test.err) continue } - gotErrorCode := err.(btcjson.Error).ErrorCode - if gotErrorCode != test.err.ErrorCode { + gotErrorCode := err.(dcrjson.Error).Code + if gotErrorCode != test.err.Code { t.Errorf("Test #%d (%s) mismatched error code - got "+ "%v, want %v", i, test.name, gotErrorCode, - test.err.ErrorCode) + test.err.Code) continue } } @@ -239,7 +240,7 @@ func TestMustRegisterCmdPanic(t *testing.T) { }() // Intentionally try to register an invalid type to force a panic. - btcjson.MustRegisterCmd("panicme", 0, 0) + dcrjson.MustRegisterCmd("panicme", 0, 0) } // TestRegisteredCmdMethods tests the RegisteredCmdMethods function ensure it @@ -248,7 +249,7 @@ func TestRegisteredCmdMethods(t *testing.T) { t.Parallel() // Ensure the registerd methods are returned. - methods := btcjson.RegisteredCmdMethods() + methods := dcrjson.RegisteredCmdMethods() if len(methods) == 0 { t.Fatal("RegisteredCmdMethods: no methods") } diff --git a/btcjson/walletsvrcmds.go b/dcrjson/walletsvrcmds.go similarity index 96% rename from btcjson/walletsvrcmds.go rename to dcrjson/walletsvrcmds.go index 2b977404..119f8737 100644 --- a/btcjson/walletsvrcmds.go +++ b/dcrjson/walletsvrcmds.go @@ -1,11 +1,12 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC commands that are supported by // a wallet server. -package btcjson +package dcrjson // AddMultisigAddressCmd defines the addmutisigaddress JSON-RPC command. type AddMultisigAddressCmd struct { @@ -68,19 +69,6 @@ func NewEncryptWalletCmd(passphrase string) *EncryptWalletCmd { } } -// EstimateFeeCmd defines the estimatefee JSON-RPC command. -type EstimateFeeCmd struct { - NumBlocks int64 -} - -// NewEstimateFeeCmd returns a new instance which can be used to issue a -// estimatefee JSON-RPC command. -func NewEstimateFeeCmd(numBlocks int64) *EstimateFeeCmd { - return &EstimateFeeCmd{ - NumBlocks: numBlocks, - } -} - // EstimatePriorityCmd defines the estimatepriority JSON-RPC command. type EstimatePriorityCmd struct { NumBlocks int64 @@ -135,8 +123,9 @@ func NewGetAddressesByAccountCmd(account string) *GetAddressesByAccountCmd { // GetBalanceCmd defines the getbalance JSON-RPC command. type GetBalanceCmd struct { - Account *string - MinConf *int `jsonrpcdefault:"1"` + Account *string + MinConf *int `jsonrpcdefault:"1"` + BalanceType *string } // NewGetBalanceCmd returns a new instance which can be used to issue a @@ -144,16 +133,19 @@ type GetBalanceCmd struct { // // The parameters which are pointers indicate they are optional. Passing nil // for optional parameters will use the default value. -func NewGetBalanceCmd(account *string, minConf *int) *GetBalanceCmd { +func NewGetBalanceCmd(account *string, minConf *int, balType *string) *GetBalanceCmd { + return &GetBalanceCmd{ - Account: account, - MinConf: minConf, + Account: account, + MinConf: minConf, + BalanceType: balType, } } // GetNewAddressCmd defines the getnewaddress JSON-RPC command. type GetNewAddressCmd struct { Account *string + Verbose *bool `jsonrpcdefault:"false"` } // NewGetNewAddressCmd returns a new instance which can be used to issue a @@ -161,15 +153,17 @@ type GetNewAddressCmd struct { // // The parameters which are pointers indicate they are optional. Passing nil // for optional parameters will use the default value. -func NewGetNewAddressCmd(account *string) *GetNewAddressCmd { +func NewGetNewAddressCmd(account *string, verbose *bool) *GetNewAddressCmd { return &GetNewAddressCmd{ Account: account, + Verbose: verbose, } } // GetRawChangeAddressCmd defines the getrawchangeaddress JSON-RPC command. type GetRawChangeAddressCmd struct { Account *string + Verbose *bool `jsonrpcdefault:"false"` } // NewGetRawChangeAddressCmd returns a new instance which can be used to issue a @@ -177,9 +171,11 @@ type GetRawChangeAddressCmd struct { // // The parameters which are pointers indicate they are optional. Passing nil // for optional parameters will use the default value. -func NewGetRawChangeAddressCmd(account *string) *GetRawChangeAddressCmd { +func NewGetRawChangeAddressCmd(account *string, + verbose *bool) *GetRawChangeAddressCmd { return &GetRawChangeAddressCmd{ Account: account, + Verbose: verbose, } } @@ -428,7 +424,7 @@ func NewLockUnspentCmd(unlock bool, transactions []TransactionInput) *LockUnspen type MoveCmd struct { FromAccount string ToAccount string - Amount float64 // In BTC + Amount float64 // In DCR MinConf *int `jsonrpcdefault:"1"` Comment *string } @@ -452,7 +448,7 @@ func NewMoveCmd(fromAccount, toAccount string, amount float64, minConf *int, com type SendFromCmd struct { FromAccount string ToAddress string - Amount float64 // In BTC + Amount float64 // In DCR MinConf *int `jsonrpcdefault:"1"` Comment *string CommentTo *string @@ -477,7 +473,7 @@ func NewSendFromCmd(fromAccount, toAddress string, amount float64, minConf *int, // SendManyCmd defines the sendmany JSON-RPC command. type SendManyCmd struct { FromAccount string - Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In BTC + Amounts map[string]float64 `jsonrpcusage:"{\"address\":amount,...}"` // In DCR MinConf *int `jsonrpcdefault:"1"` Comment *string } @@ -535,7 +531,7 @@ func NewSetAccountCmd(address, account string) *SetAccountCmd { // SetTxFeeCmd defines the settxfee JSON-RPC command. type SetTxFeeCmd struct { - Amount float64 // In BTC + Amount float64 // In DCR } // NewSetTxFeeCmd returns a new instance which can be used to issue a settxfee @@ -562,10 +558,11 @@ func NewSignMessageCmd(address, message string) *SignMessageCmd { } // RawTxInput models the data needed for raw transaction input that is used in -// the SignRawTransactionCmd struct. +// the SignRawTransactionCmd struct. Contains Decred additions. type RawTxInput struct { Txid string `json:"txid"` Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` ScriptPubKey string `json:"scriptPubKey"` RedeemScript string `json:"redeemScript"` } @@ -639,7 +636,6 @@ func init() { MustRegisterCmd("createmultisig", (*CreateMultisigCmd)(nil), flags) MustRegisterCmd("dumpprivkey", (*DumpPrivKeyCmd)(nil), flags) MustRegisterCmd("encryptwallet", (*EncryptWalletCmd)(nil), flags) - MustRegisterCmd("estimatefee", (*EstimateFeeCmd)(nil), flags) MustRegisterCmd("estimatepriority", (*EstimatePriorityCmd)(nil), flags) MustRegisterCmd("getaccount", (*GetAccountCmd)(nil), flags) MustRegisterCmd("getaccountaddress", (*GetAccountAddressCmd)(nil), flags) diff --git a/btcjson/walletsvrcmds_test.go b/dcrjson/walletsvrcmds_test.go similarity index 56% rename from btcjson/walletsvrcmds_test.go rename to dcrjson/walletsvrcmds_test.go index ed551da8..f3446e23 100644 --- a/btcjson/walletsvrcmds_test.go +++ b/dcrjson/walletsvrcmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestWalletSvrCmds tests all of the wallet server commands marshal and @@ -32,14 +33,14 @@ func TestWalletSvrCmds(t *testing.T) { { name: "addmultisigaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("addmultisigaddress", 2, []string{"031234", "035678"}) + return dcrjson.NewCmd("addmultisigaddress", 2, []string{"031234", "035678"}) }, staticCmd: func() interface{} { keys := []string{"031234", "035678"} - return btcjson.NewAddMultisigAddressCmd(2, keys, nil) + return dcrjson.NewAddMultisigAddressCmd(2, keys, nil) }, marshalled: `{"jsonrpc":"1.0","method":"addmultisigaddress","params":[2,["031234","035678"]],"id":1}`, - unmarshalled: &btcjson.AddMultisigAddressCmd{ + unmarshalled: &dcrjson.AddMultisigAddressCmd{ NRequired: 2, Keys: []string{"031234", "035678"}, Account: nil, @@ -48,30 +49,30 @@ func TestWalletSvrCmds(t *testing.T) { { name: "addmultisigaddress optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("addmultisigaddress", 2, []string{"031234", "035678"}, "test") + return dcrjson.NewCmd("addmultisigaddress", 2, []string{"031234", "035678"}, "test") }, staticCmd: func() interface{} { keys := []string{"031234", "035678"} - return btcjson.NewAddMultisigAddressCmd(2, keys, btcjson.String("test")) + return dcrjson.NewAddMultisigAddressCmd(2, keys, dcrjson.String("test")) }, marshalled: `{"jsonrpc":"1.0","method":"addmultisigaddress","params":[2,["031234","035678"],"test"],"id":1}`, - unmarshalled: &btcjson.AddMultisigAddressCmd{ + unmarshalled: &dcrjson.AddMultisigAddressCmd{ NRequired: 2, Keys: []string{"031234", "035678"}, - Account: btcjson.String("test"), + Account: dcrjson.String("test"), }, }, { name: "createmultisig", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("createmultisig", 2, []string{"031234", "035678"}) + return dcrjson.NewCmd("createmultisig", 2, []string{"031234", "035678"}) }, staticCmd: func() interface{} { keys := []string{"031234", "035678"} - return btcjson.NewCreateMultisigCmd(2, keys) + return dcrjson.NewCreateMultisigCmd(2, keys) }, marshalled: `{"jsonrpc":"1.0","method":"createmultisig","params":[2,["031234","035678"]],"id":1}`, - unmarshalled: &btcjson.CreateMultisigCmd{ + unmarshalled: &dcrjson.CreateMultisigCmd{ NRequired: 2, Keys: []string{"031234", "035678"}, }, @@ -79,729 +80,733 @@ func TestWalletSvrCmds(t *testing.T) { { name: "dumpprivkey", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("dumpprivkey", "1Address") + return dcrjson.NewCmd("dumpprivkey", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewDumpPrivKeyCmd("1Address") + return dcrjson.NewDumpPrivKeyCmd("1Address") }, marshalled: `{"jsonrpc":"1.0","method":"dumpprivkey","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.DumpPrivKeyCmd{ + unmarshalled: &dcrjson.DumpPrivKeyCmd{ Address: "1Address", }, }, { name: "encryptwallet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("encryptwallet", "pass") + return dcrjson.NewCmd("encryptwallet", "pass") }, staticCmd: func() interface{} { - return btcjson.NewEncryptWalletCmd("pass") + return dcrjson.NewEncryptWalletCmd("pass") }, marshalled: `{"jsonrpc":"1.0","method":"encryptwallet","params":["pass"],"id":1}`, - unmarshalled: &btcjson.EncryptWalletCmd{ + unmarshalled: &dcrjson.EncryptWalletCmd{ Passphrase: "pass", }, }, { name: "estimatefee", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("estimatefee", 6) + return dcrjson.NewCmd("estimatefee", 6) }, staticCmd: func() interface{} { - return btcjson.NewEstimateFeeCmd(6) + return dcrjson.NewEstimateFeeCmd(6) }, marshalled: `{"jsonrpc":"1.0","method":"estimatefee","params":[6],"id":1}`, - unmarshalled: &btcjson.EstimateFeeCmd{ + unmarshalled: &dcrjson.EstimateFeeCmd{ NumBlocks: 6, }, }, { name: "estimatepriority", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("estimatepriority", 6) + return dcrjson.NewCmd("estimatepriority", 6) }, staticCmd: func() interface{} { - return btcjson.NewEstimatePriorityCmd(6) + return dcrjson.NewEstimatePriorityCmd(6) }, marshalled: `{"jsonrpc":"1.0","method":"estimatepriority","params":[6],"id":1}`, - unmarshalled: &btcjson.EstimatePriorityCmd{ + unmarshalled: &dcrjson.EstimatePriorityCmd{ NumBlocks: 6, }, }, { name: "getaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getaccount", "1Address") + return dcrjson.NewCmd("getaccount", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewGetAccountCmd("1Address") + return dcrjson.NewGetAccountCmd("1Address") }, marshalled: `{"jsonrpc":"1.0","method":"getaccount","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.GetAccountCmd{ + unmarshalled: &dcrjson.GetAccountCmd{ Address: "1Address", }, }, { name: "getaccountaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getaccountaddress", "acct") + return dcrjson.NewCmd("getaccountaddress", "acct") }, staticCmd: func() interface{} { - return btcjson.NewGetAccountAddressCmd("acct") + return dcrjson.NewGetAccountAddressCmd("acct") }, marshalled: `{"jsonrpc":"1.0","method":"getaccountaddress","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetAccountAddressCmd{ + unmarshalled: &dcrjson.GetAccountAddressCmd{ Account: "acct", }, }, { name: "getaddressesbyaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getaddressesbyaccount", "acct") + return dcrjson.NewCmd("getaddressesbyaccount", "acct") }, staticCmd: func() interface{} { - return btcjson.NewGetAddressesByAccountCmd("acct") + return dcrjson.NewGetAddressesByAccountCmd("acct") }, marshalled: `{"jsonrpc":"1.0","method":"getaddressesbyaccount","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetAddressesByAccountCmd{ + unmarshalled: &dcrjson.GetAddressesByAccountCmd{ Account: "acct", }, }, { name: "getbalance", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getbalance") + return dcrjson.NewCmd("getbalance") }, staticCmd: func() interface{} { - return btcjson.NewGetBalanceCmd(nil, nil) + return dcrjson.NewGetBalanceCmd(nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":[],"id":1}`, - unmarshalled: &btcjson.GetBalanceCmd{ + unmarshalled: &dcrjson.GetBalanceCmd{ Account: nil, - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), }, }, { name: "getbalance optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getbalance", "acct") + return dcrjson.NewCmd("getbalance", "acct") }, staticCmd: func() interface{} { - return btcjson.NewGetBalanceCmd(btcjson.String("acct"), nil) + return dcrjson.NewGetBalanceCmd(dcrjson.String("acct"), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetBalanceCmd{ - Account: btcjson.String("acct"), - MinConf: btcjson.Int(1), + unmarshalled: &dcrjson.GetBalanceCmd{ + Account: dcrjson.String("acct"), + MinConf: dcrjson.Int(1), }, }, { name: "getbalance optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getbalance", "acct", 6) + return dcrjson.NewCmd("getbalance", "acct", 6) }, staticCmd: func() interface{} { - return btcjson.NewGetBalanceCmd(btcjson.String("acct"), btcjson.Int(6)) + return dcrjson.NewGetBalanceCmd(dcrjson.String("acct"), dcrjson.Int(6), nil) }, marshalled: `{"jsonrpc":"1.0","method":"getbalance","params":["acct",6],"id":1}`, - unmarshalled: &btcjson.GetBalanceCmd{ - Account: btcjson.String("acct"), - MinConf: btcjson.Int(6), + unmarshalled: &dcrjson.GetBalanceCmd{ + Account: dcrjson.String("acct"), + MinConf: dcrjson.Int(6), }, }, { name: "getnewaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnewaddress") + return dcrjson.NewCmd("getnewaddress") }, staticCmd: func() interface{} { - return btcjson.NewGetNewAddressCmd(nil) + return dcrjson.NewGetNewAddressCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getnewaddress","params":[],"id":1}`, - unmarshalled: &btcjson.GetNewAddressCmd{ + unmarshalled: &dcrjson.GetNewAddressCmd{ Account: nil, + Verbose: dcrjson.Bool(false), }, }, { name: "getnewaddress optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getnewaddress", "acct") + return dcrjson.NewCmd("getnewaddress", "acct", "true") }, staticCmd: func() interface{} { - return btcjson.NewGetNewAddressCmd(btcjson.String("acct")) + return dcrjson.NewGetNewAddressCmd(dcrjson.String("acct"), dcrjson.Bool(true)) }, - marshalled: `{"jsonrpc":"1.0","method":"getnewaddress","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetNewAddressCmd{ - Account: btcjson.String("acct"), + marshalled: `{"jsonrpc":"1.0","method":"getnewaddress","params":["acct",true],"id":1}`, + unmarshalled: &dcrjson.GetNewAddressCmd{ + Account: dcrjson.String("acct"), + Verbose: dcrjson.Bool(true), }, }, { name: "getrawchangeaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawchangeaddress") + return dcrjson.NewCmd("getrawchangeaddress") }, staticCmd: func() interface{} { - return btcjson.NewGetRawChangeAddressCmd(nil) + return dcrjson.NewGetRawChangeAddressCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"getrawchangeaddress","params":[],"id":1}`, - unmarshalled: &btcjson.GetRawChangeAddressCmd{ + unmarshalled: &dcrjson.GetRawChangeAddressCmd{ Account: nil, + Verbose: dcrjson.Bool(false), }, }, { name: "getrawchangeaddress optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getrawchangeaddress", "acct") + return dcrjson.NewCmd("getrawchangeaddress", "acct", "true") }, staticCmd: func() interface{} { - return btcjson.NewGetRawChangeAddressCmd(btcjson.String("acct")) + return dcrjson.NewGetRawChangeAddressCmd(dcrjson.String("acct"), dcrjson.Bool(true)) }, - marshalled: `{"jsonrpc":"1.0","method":"getrawchangeaddress","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetRawChangeAddressCmd{ - Account: btcjson.String("acct"), + marshalled: `{"jsonrpc":"1.0","method":"getrawchangeaddress","params":["acct",true],"id":1}`, + unmarshalled: &dcrjson.GetRawChangeAddressCmd{ + Account: dcrjson.String("acct"), + Verbose: dcrjson.Bool(true), }, }, { name: "getreceivedbyaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getreceivedbyaccount", "acct") + return dcrjson.NewCmd("getreceivedbyaccount", "acct") }, staticCmd: func() interface{} { - return btcjson.NewGetReceivedByAccountCmd("acct", nil) + return dcrjson.NewGetReceivedByAccountCmd("acct", nil) }, marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaccount","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetReceivedByAccountCmd{ + unmarshalled: &dcrjson.GetReceivedByAccountCmd{ Account: "acct", - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), }, }, { name: "getreceivedbyaccount optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getreceivedbyaccount", "acct", 6) + return dcrjson.NewCmd("getreceivedbyaccount", "acct", 6) }, staticCmd: func() interface{} { - return btcjson.NewGetReceivedByAccountCmd("acct", btcjson.Int(6)) + return dcrjson.NewGetReceivedByAccountCmd("acct", dcrjson.Int(6)) }, marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaccount","params":["acct",6],"id":1}`, - unmarshalled: &btcjson.GetReceivedByAccountCmd{ + unmarshalled: &dcrjson.GetReceivedByAccountCmd{ Account: "acct", - MinConf: btcjson.Int(6), + MinConf: dcrjson.Int(6), }, }, { name: "getreceivedbyaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getreceivedbyaddress", "1Address") + return dcrjson.NewCmd("getreceivedbyaddress", "1Address") }, staticCmd: func() interface{} { - return btcjson.NewGetReceivedByAddressCmd("1Address", nil) + return dcrjson.NewGetReceivedByAddressCmd("1Address", nil) }, marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaddress","params":["1Address"],"id":1}`, - unmarshalled: &btcjson.GetReceivedByAddressCmd{ + unmarshalled: &dcrjson.GetReceivedByAddressCmd{ Address: "1Address", - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), }, }, { name: "getreceivedbyaddress optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getreceivedbyaddress", "1Address", 6) + return dcrjson.NewCmd("getreceivedbyaddress", "1Address", 6) }, staticCmd: func() interface{} { - return btcjson.NewGetReceivedByAddressCmd("1Address", btcjson.Int(6)) + return dcrjson.NewGetReceivedByAddressCmd("1Address", dcrjson.Int(6)) }, marshalled: `{"jsonrpc":"1.0","method":"getreceivedbyaddress","params":["1Address",6],"id":1}`, - unmarshalled: &btcjson.GetReceivedByAddressCmd{ + unmarshalled: &dcrjson.GetReceivedByAddressCmd{ Address: "1Address", - MinConf: btcjson.Int(6), + MinConf: dcrjson.Int(6), }, }, { name: "gettransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettransaction", "123") + return dcrjson.NewCmd("gettransaction", "123") }, staticCmd: func() interface{} { - return btcjson.NewGetTransactionCmd("123", nil) + return dcrjson.NewGetTransactionCmd("123", nil) }, marshalled: `{"jsonrpc":"1.0","method":"gettransaction","params":["123"],"id":1}`, - unmarshalled: &btcjson.GetTransactionCmd{ + unmarshalled: &dcrjson.GetTransactionCmd{ Txid: "123", - IncludeWatchOnly: btcjson.Bool(false), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "gettransaction optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("gettransaction", "123", true) + return dcrjson.NewCmd("gettransaction", "123", true) }, staticCmd: func() interface{} { - return btcjson.NewGetTransactionCmd("123", btcjson.Bool(true)) + return dcrjson.NewGetTransactionCmd("123", dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"gettransaction","params":["123",true],"id":1}`, - unmarshalled: &btcjson.GetTransactionCmd{ + unmarshalled: &dcrjson.GetTransactionCmd{ Txid: "123", - IncludeWatchOnly: btcjson.Bool(true), + IncludeWatchOnly: dcrjson.Bool(true), }, }, { name: "importprivkey", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importprivkey", "abc") + return dcrjson.NewCmd("importprivkey", "abc") }, staticCmd: func() interface{} { - return btcjson.NewImportPrivKeyCmd("abc", nil, nil) + return dcrjson.NewImportPrivKeyCmd("abc", nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"importprivkey","params":["abc"],"id":1}`, - unmarshalled: &btcjson.ImportPrivKeyCmd{ + unmarshalled: &dcrjson.ImportPrivKeyCmd{ PrivKey: "abc", Label: nil, - Rescan: btcjson.Bool(true), + Rescan: dcrjson.Bool(true), }, }, { name: "importprivkey optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importprivkey", "abc", "label") + return dcrjson.NewCmd("importprivkey", "abc", "label") }, staticCmd: func() interface{} { - return btcjson.NewImportPrivKeyCmd("abc", btcjson.String("label"), nil) + return dcrjson.NewImportPrivKeyCmd("abc", dcrjson.String("label"), nil) }, marshalled: `{"jsonrpc":"1.0","method":"importprivkey","params":["abc","label"],"id":1}`, - unmarshalled: &btcjson.ImportPrivKeyCmd{ + unmarshalled: &dcrjson.ImportPrivKeyCmd{ PrivKey: "abc", - Label: btcjson.String("label"), - Rescan: btcjson.Bool(true), + Label: dcrjson.String("label"), + Rescan: dcrjson.Bool(true), }, }, { name: "importprivkey optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("importprivkey", "abc", "label", false) + return dcrjson.NewCmd("importprivkey", "abc", "label", false) }, staticCmd: func() interface{} { - return btcjson.NewImportPrivKeyCmd("abc", btcjson.String("label"), btcjson.Bool(false)) + return dcrjson.NewImportPrivKeyCmd("abc", dcrjson.String("label"), dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"importprivkey","params":["abc","label",false],"id":1}`, - unmarshalled: &btcjson.ImportPrivKeyCmd{ + unmarshalled: &dcrjson.ImportPrivKeyCmd{ PrivKey: "abc", - Label: btcjson.String("label"), - Rescan: btcjson.Bool(false), + Label: dcrjson.String("label"), + Rescan: dcrjson.Bool(false), }, }, { name: "keypoolrefill", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("keypoolrefill") + return dcrjson.NewCmd("keypoolrefill") }, staticCmd: func() interface{} { - return btcjson.NewKeyPoolRefillCmd(nil) + return dcrjson.NewKeyPoolRefillCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"keypoolrefill","params":[],"id":1}`, - unmarshalled: &btcjson.KeyPoolRefillCmd{ - NewSize: btcjson.Uint(100), + unmarshalled: &dcrjson.KeyPoolRefillCmd{ + NewSize: dcrjson.Uint(100), }, }, { name: "keypoolrefill optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("keypoolrefill", 200) + return dcrjson.NewCmd("keypoolrefill", 200) }, staticCmd: func() interface{} { - return btcjson.NewKeyPoolRefillCmd(btcjson.Uint(200)) + return dcrjson.NewKeyPoolRefillCmd(dcrjson.Uint(200)) }, marshalled: `{"jsonrpc":"1.0","method":"keypoolrefill","params":[200],"id":1}`, - unmarshalled: &btcjson.KeyPoolRefillCmd{ - NewSize: btcjson.Uint(200), + unmarshalled: &dcrjson.KeyPoolRefillCmd{ + NewSize: dcrjson.Uint(200), }, }, { name: "listaccounts", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listaccounts") + return dcrjson.NewCmd("listaccounts") }, staticCmd: func() interface{} { - return btcjson.NewListAccountsCmd(nil) + return dcrjson.NewListAccountsCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"listaccounts","params":[],"id":1}`, - unmarshalled: &btcjson.ListAccountsCmd{ - MinConf: btcjson.Int(1), + unmarshalled: &dcrjson.ListAccountsCmd{ + MinConf: dcrjson.Int(1), }, }, { name: "listaccounts optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listaccounts", 6) + return dcrjson.NewCmd("listaccounts", 6) }, staticCmd: func() interface{} { - return btcjson.NewListAccountsCmd(btcjson.Int(6)) + return dcrjson.NewListAccountsCmd(dcrjson.Int(6)) }, marshalled: `{"jsonrpc":"1.0","method":"listaccounts","params":[6],"id":1}`, - unmarshalled: &btcjson.ListAccountsCmd{ - MinConf: btcjson.Int(6), + unmarshalled: &dcrjson.ListAccountsCmd{ + MinConf: dcrjson.Int(6), }, }, { name: "listaddressgroupings", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listaddressgroupings") + return dcrjson.NewCmd("listaddressgroupings") }, staticCmd: func() interface{} { - return btcjson.NewListAddressGroupingsCmd() + return dcrjson.NewListAddressGroupingsCmd() }, marshalled: `{"jsonrpc":"1.0","method":"listaddressgroupings","params":[],"id":1}`, - unmarshalled: &btcjson.ListAddressGroupingsCmd{}, + unmarshalled: &dcrjson.ListAddressGroupingsCmd{}, }, { name: "listlockunspent", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listlockunspent") + return dcrjson.NewCmd("listlockunspent") }, staticCmd: func() interface{} { - return btcjson.NewListLockUnspentCmd() + return dcrjson.NewListLockUnspentCmd() }, marshalled: `{"jsonrpc":"1.0","method":"listlockunspent","params":[],"id":1}`, - unmarshalled: &btcjson.ListLockUnspentCmd{}, + unmarshalled: &dcrjson.ListLockUnspentCmd{}, }, { name: "listreceivedbyaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaccount") + return dcrjson.NewCmd("listreceivedbyaccount") }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAccountCmd(nil, nil, nil) + return dcrjson.NewListReceivedByAccountCmd(nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaccount","params":[],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAccountCmd{ - MinConf: btcjson.Int(1), - IncludeEmpty: btcjson.Bool(false), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAccountCmd{ + MinConf: dcrjson.Int(1), + IncludeEmpty: dcrjson.Bool(false), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaccount optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaccount", 6) + return dcrjson.NewCmd("listreceivedbyaccount", 6) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAccountCmd(btcjson.Int(6), nil, nil) + return dcrjson.NewListReceivedByAccountCmd(dcrjson.Int(6), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaccount","params":[6],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAccountCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(false), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAccountCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(false), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaccount optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaccount", 6, true) + return dcrjson.NewCmd("listreceivedbyaccount", 6, true) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAccountCmd(btcjson.Int(6), btcjson.Bool(true), nil) + return dcrjson.NewListReceivedByAccountCmd(dcrjson.Int(6), dcrjson.Bool(true), nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaccount","params":[6,true],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAccountCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(true), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAccountCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(true), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaccount optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaccount", 6, true, false) + return dcrjson.NewCmd("listreceivedbyaccount", 6, true, false) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAccountCmd(btcjson.Int(6), btcjson.Bool(true), btcjson.Bool(false)) + return dcrjson.NewListReceivedByAccountCmd(dcrjson.Int(6), dcrjson.Bool(true), dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaccount","params":[6,true,false],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAccountCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(true), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAccountCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(true), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaddress") + return dcrjson.NewCmd("listreceivedbyaddress") }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAddressCmd(nil, nil, nil) + return dcrjson.NewListReceivedByAddressCmd(nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaddress","params":[],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAddressCmd{ - MinConf: btcjson.Int(1), - IncludeEmpty: btcjson.Bool(false), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAddressCmd{ + MinConf: dcrjson.Int(1), + IncludeEmpty: dcrjson.Bool(false), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaddress optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaddress", 6) + return dcrjson.NewCmd("listreceivedbyaddress", 6) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAddressCmd(btcjson.Int(6), nil, nil) + return dcrjson.NewListReceivedByAddressCmd(dcrjson.Int(6), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaddress","params":[6],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAddressCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(false), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAddressCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(false), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaddress optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaddress", 6, true) + return dcrjson.NewCmd("listreceivedbyaddress", 6, true) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAddressCmd(btcjson.Int(6), btcjson.Bool(true), nil) + return dcrjson.NewListReceivedByAddressCmd(dcrjson.Int(6), dcrjson.Bool(true), nil) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaddress","params":[6,true],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAddressCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(true), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAddressCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(true), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listreceivedbyaddress optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listreceivedbyaddress", 6, true, false) + return dcrjson.NewCmd("listreceivedbyaddress", 6, true, false) }, staticCmd: func() interface{} { - return btcjson.NewListReceivedByAddressCmd(btcjson.Int(6), btcjson.Bool(true), btcjson.Bool(false)) + return dcrjson.NewListReceivedByAddressCmd(dcrjson.Int(6), dcrjson.Bool(true), dcrjson.Bool(false)) }, marshalled: `{"jsonrpc":"1.0","method":"listreceivedbyaddress","params":[6,true,false],"id":1}`, - unmarshalled: &btcjson.ListReceivedByAddressCmd{ - MinConf: btcjson.Int(6), - IncludeEmpty: btcjson.Bool(true), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListReceivedByAddressCmd{ + MinConf: dcrjson.Int(6), + IncludeEmpty: dcrjson.Bool(true), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listsinceblock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listsinceblock") + return dcrjson.NewCmd("listsinceblock") }, staticCmd: func() interface{} { - return btcjson.NewListSinceBlockCmd(nil, nil, nil) + return dcrjson.NewListSinceBlockCmd(nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listsinceblock","params":[],"id":1}`, - unmarshalled: &btcjson.ListSinceBlockCmd{ + unmarshalled: &dcrjson.ListSinceBlockCmd{ BlockHash: nil, - TargetConfirmations: btcjson.Int(1), - IncludeWatchOnly: btcjson.Bool(false), + TargetConfirmations: dcrjson.Int(1), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listsinceblock optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listsinceblock", "123") + return dcrjson.NewCmd("listsinceblock", "123") }, staticCmd: func() interface{} { - return btcjson.NewListSinceBlockCmd(btcjson.String("123"), nil, nil) + return dcrjson.NewListSinceBlockCmd(dcrjson.String("123"), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listsinceblock","params":["123"],"id":1}`, - unmarshalled: &btcjson.ListSinceBlockCmd{ - BlockHash: btcjson.String("123"), - TargetConfirmations: btcjson.Int(1), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListSinceBlockCmd{ + BlockHash: dcrjson.String("123"), + TargetConfirmations: dcrjson.Int(1), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listsinceblock optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listsinceblock", "123", 6) + return dcrjson.NewCmd("listsinceblock", "123", 6) }, staticCmd: func() interface{} { - return btcjson.NewListSinceBlockCmd(btcjson.String("123"), btcjson.Int(6), nil) + return dcrjson.NewListSinceBlockCmd(dcrjson.String("123"), dcrjson.Int(6), nil) }, marshalled: `{"jsonrpc":"1.0","method":"listsinceblock","params":["123",6],"id":1}`, - unmarshalled: &btcjson.ListSinceBlockCmd{ - BlockHash: btcjson.String("123"), - TargetConfirmations: btcjson.Int(6), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListSinceBlockCmd{ + BlockHash: dcrjson.String("123"), + TargetConfirmations: dcrjson.Int(6), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listsinceblock optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listsinceblock", "123", 6, true) + return dcrjson.NewCmd("listsinceblock", "123", 6, true) }, staticCmd: func() interface{} { - return btcjson.NewListSinceBlockCmd(btcjson.String("123"), btcjson.Int(6), btcjson.Bool(true)) + return dcrjson.NewListSinceBlockCmd(dcrjson.String("123"), dcrjson.Int(6), dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"listsinceblock","params":["123",6,true],"id":1}`, - unmarshalled: &btcjson.ListSinceBlockCmd{ - BlockHash: btcjson.String("123"), - TargetConfirmations: btcjson.Int(6), - IncludeWatchOnly: btcjson.Bool(true), + unmarshalled: &dcrjson.ListSinceBlockCmd{ + BlockHash: dcrjson.String("123"), + TargetConfirmations: dcrjson.Int(6), + IncludeWatchOnly: dcrjson.Bool(true), }, }, { name: "listtransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listtransactions") + return dcrjson.NewCmd("listtransactions") }, staticCmd: func() interface{} { - return btcjson.NewListTransactionsCmd(nil, nil, nil, nil) + return dcrjson.NewListTransactionsCmd(nil, nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":[],"id":1}`, - unmarshalled: &btcjson.ListTransactionsCmd{ + unmarshalled: &dcrjson.ListTransactionsCmd{ Account: nil, - Count: btcjson.Int(10), - From: btcjson.Int(0), - IncludeWatchOnly: btcjson.Bool(false), + Count: dcrjson.Int(10), + From: dcrjson.Int(0), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listtransactions optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listtransactions", "acct") + return dcrjson.NewCmd("listtransactions", "acct") }, staticCmd: func() interface{} { - return btcjson.NewListTransactionsCmd(btcjson.String("acct"), nil, nil, nil) + return dcrjson.NewListTransactionsCmd(dcrjson.String("acct"), nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":["acct"],"id":1}`, - unmarshalled: &btcjson.ListTransactionsCmd{ - Account: btcjson.String("acct"), - Count: btcjson.Int(10), - From: btcjson.Int(0), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListTransactionsCmd{ + Account: dcrjson.String("acct"), + Count: dcrjson.Int(10), + From: dcrjson.Int(0), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listtransactions optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listtransactions", "acct", 20) + return dcrjson.NewCmd("listtransactions", "acct", 20) }, staticCmd: func() interface{} { - return btcjson.NewListTransactionsCmd(btcjson.String("acct"), btcjson.Int(20), nil, nil) + return dcrjson.NewListTransactionsCmd(dcrjson.String("acct"), dcrjson.Int(20), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":["acct",20],"id":1}`, - unmarshalled: &btcjson.ListTransactionsCmd{ - Account: btcjson.String("acct"), - Count: btcjson.Int(20), - From: btcjson.Int(0), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListTransactionsCmd{ + Account: dcrjson.String("acct"), + Count: dcrjson.Int(20), + From: dcrjson.Int(0), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listtransactions optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listtransactions", "acct", 20, 1) + return dcrjson.NewCmd("listtransactions", "acct", 20, 1) }, staticCmd: func() interface{} { - return btcjson.NewListTransactionsCmd(btcjson.String("acct"), btcjson.Int(20), - btcjson.Int(1), nil) + return dcrjson.NewListTransactionsCmd(dcrjson.String("acct"), dcrjson.Int(20), + dcrjson.Int(1), nil) }, marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":["acct",20,1],"id":1}`, - unmarshalled: &btcjson.ListTransactionsCmd{ - Account: btcjson.String("acct"), - Count: btcjson.Int(20), - From: btcjson.Int(1), - IncludeWatchOnly: btcjson.Bool(false), + unmarshalled: &dcrjson.ListTransactionsCmd{ + Account: dcrjson.String("acct"), + Count: dcrjson.Int(20), + From: dcrjson.Int(1), + IncludeWatchOnly: dcrjson.Bool(false), }, }, { name: "listtransactions optional4", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listtransactions", "acct", 20, 1, true) + return dcrjson.NewCmd("listtransactions", "acct", 20, 1, true) }, staticCmd: func() interface{} { - return btcjson.NewListTransactionsCmd(btcjson.String("acct"), btcjson.Int(20), - btcjson.Int(1), btcjson.Bool(true)) + return dcrjson.NewListTransactionsCmd(dcrjson.String("acct"), dcrjson.Int(20), + dcrjson.Int(1), dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"listtransactions","params":["acct",20,1,true],"id":1}`, - unmarshalled: &btcjson.ListTransactionsCmd{ - Account: btcjson.String("acct"), - Count: btcjson.Int(20), - From: btcjson.Int(1), - IncludeWatchOnly: btcjson.Bool(true), + unmarshalled: &dcrjson.ListTransactionsCmd{ + Account: dcrjson.String("acct"), + Count: dcrjson.Int(20), + From: dcrjson.Int(1), + IncludeWatchOnly: dcrjson.Bool(true), }, }, { name: "listunspent", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listunspent") + return dcrjson.NewCmd("listunspent") }, staticCmd: func() interface{} { - return btcjson.NewListUnspentCmd(nil, nil, nil) + return dcrjson.NewListUnspentCmd(nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listunspent","params":[],"id":1}`, - unmarshalled: &btcjson.ListUnspentCmd{ - MinConf: btcjson.Int(1), - MaxConf: btcjson.Int(9999999), + unmarshalled: &dcrjson.ListUnspentCmd{ + MinConf: dcrjson.Int(1), + MaxConf: dcrjson.Int(9999999), Addresses: nil, }, }, { name: "listunspent optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listunspent", 6) + return dcrjson.NewCmd("listunspent", 6) }, staticCmd: func() interface{} { - return btcjson.NewListUnspentCmd(btcjson.Int(6), nil, nil) + return dcrjson.NewListUnspentCmd(dcrjson.Int(6), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listunspent","params":[6],"id":1}`, - unmarshalled: &btcjson.ListUnspentCmd{ - MinConf: btcjson.Int(6), - MaxConf: btcjson.Int(9999999), + unmarshalled: &dcrjson.ListUnspentCmd{ + MinConf: dcrjson.Int(6), + MaxConf: dcrjson.Int(9999999), Addresses: nil, }, }, { name: "listunspent optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listunspent", 6, 100) + return dcrjson.NewCmd("listunspent", 6, 100) }, staticCmd: func() interface{} { - return btcjson.NewListUnspentCmd(btcjson.Int(6), btcjson.Int(100), nil) + return dcrjson.NewListUnspentCmd(dcrjson.Int(6), dcrjson.Int(100), nil) }, marshalled: `{"jsonrpc":"1.0","method":"listunspent","params":[6,100],"id":1}`, - unmarshalled: &btcjson.ListUnspentCmd{ - MinConf: btcjson.Int(6), - MaxConf: btcjson.Int(100), + unmarshalled: &dcrjson.ListUnspentCmd{ + MinConf: dcrjson.Int(6), + MaxConf: dcrjson.Int(100), Addresses: nil, }, }, { name: "listunspent optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listunspent", 6, 100, []string{"1Address", "1Address2"}) + return dcrjson.NewCmd("listunspent", 6, 100, []string{"1Address", "1Address2"}) }, staticCmd: func() interface{} { - return btcjson.NewListUnspentCmd(btcjson.Int(6), btcjson.Int(100), + return dcrjson.NewListUnspentCmd(dcrjson.Int(6), dcrjson.Int(100), &[]string{"1Address", "1Address2"}) }, marshalled: `{"jsonrpc":"1.0","method":"listunspent","params":[6,100,["1Address","1Address2"]],"id":1}`, - unmarshalled: &btcjson.ListUnspentCmd{ - MinConf: btcjson.Int(6), - MaxConf: btcjson.Int(100), + unmarshalled: &dcrjson.ListUnspentCmd{ + MinConf: dcrjson.Int(6), + MaxConf: dcrjson.Int(100), Addresses: &[]string{"1Address", "1Address2"}, }, }, { name: "lockunspent", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("lockunspent", true, `[{"txid":"123","vout":1}]`) + return dcrjson.NewCmd("lockunspent", true, `[{"txid":"123","vout":1}]`) }, staticCmd: func() interface{} { - txInputs := []btcjson.TransactionInput{ + txInputs := []dcrjson.TransactionInput{ {Txid: "123", Vout: 1}, } - return btcjson.NewLockUnspentCmd(true, txInputs) + return dcrjson.NewLockUnspentCmd(true, txInputs) }, - marshalled: `{"jsonrpc":"1.0","method":"lockunspent","params":[true,[{"txid":"123","vout":1}]],"id":1}`, - unmarshalled: &btcjson.LockUnspentCmd{ + marshalled: `{"jsonrpc":"1.0","method":"lockunspent","params":[true,[{"txid":"123","vout":1,"tree":0}]],"id":1}`, + unmarshalled: &dcrjson.LockUnspentCmd{ Unlock: true, - Transactions: []btcjson.TransactionInput{ + Transactions: []dcrjson.TransactionInput{ {Txid: "123", Vout: 1}, }, }, @@ -809,68 +814,68 @@ func TestWalletSvrCmds(t *testing.T) { { name: "move", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("move", "from", "to", 0.5) + return dcrjson.NewCmd("move", "from", "to", 0.5) }, staticCmd: func() interface{} { - return btcjson.NewMoveCmd("from", "to", 0.5, nil, nil) + return dcrjson.NewMoveCmd("from", "to", 0.5, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5],"id":1}`, - unmarshalled: &btcjson.MoveCmd{ + unmarshalled: &dcrjson.MoveCmd{ FromAccount: "from", ToAccount: "to", Amount: 0.5, - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), Comment: nil, }, }, { name: "move optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("move", "from", "to", 0.5, 6) + return dcrjson.NewCmd("move", "from", "to", 0.5, 6) }, staticCmd: func() interface{} { - return btcjson.NewMoveCmd("from", "to", 0.5, btcjson.Int(6), nil) + return dcrjson.NewMoveCmd("from", "to", 0.5, dcrjson.Int(6), nil) }, marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5,6],"id":1}`, - unmarshalled: &btcjson.MoveCmd{ + unmarshalled: &dcrjson.MoveCmd{ FromAccount: "from", ToAccount: "to", Amount: 0.5, - MinConf: btcjson.Int(6), + MinConf: dcrjson.Int(6), Comment: nil, }, }, { name: "move optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("move", "from", "to", 0.5, 6, "comment") + return dcrjson.NewCmd("move", "from", "to", 0.5, 6, "comment") }, staticCmd: func() interface{} { - return btcjson.NewMoveCmd("from", "to", 0.5, btcjson.Int(6), btcjson.String("comment")) + return dcrjson.NewMoveCmd("from", "to", 0.5, dcrjson.Int(6), dcrjson.String("comment")) }, marshalled: `{"jsonrpc":"1.0","method":"move","params":["from","to",0.5,6,"comment"],"id":1}`, - unmarshalled: &btcjson.MoveCmd{ + unmarshalled: &dcrjson.MoveCmd{ FromAccount: "from", ToAccount: "to", Amount: 0.5, - MinConf: btcjson.Int(6), - Comment: btcjson.String("comment"), + MinConf: dcrjson.Int(6), + Comment: dcrjson.String("comment"), }, }, { name: "sendfrom", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5) + return dcrjson.NewCmd("sendfrom", "from", "1Address", 0.5) }, staticCmd: func() interface{} { - return btcjson.NewSendFromCmd("from", "1Address", 0.5, nil, nil, nil) + return dcrjson.NewSendFromCmd("from", "1Address", 0.5, nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5],"id":1}`, - unmarshalled: &btcjson.SendFromCmd{ + unmarshalled: &dcrjson.SendFromCmd{ FromAccount: "from", ToAddress: "1Address", Amount: 0.5, - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), Comment: nil, CommentTo: nil, }, @@ -878,17 +883,17 @@ func TestWalletSvrCmds(t *testing.T) { { name: "sendfrom optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6) + return dcrjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6) }, staticCmd: func() interface{} { - return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), nil, nil) + return dcrjson.NewSendFromCmd("from", "1Address", 0.5, dcrjson.Int(6), nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6],"id":1}`, - unmarshalled: &btcjson.SendFromCmd{ + unmarshalled: &dcrjson.SendFromCmd{ FromAccount: "from", ToAddress: "1Address", Amount: 0.5, - MinConf: btcjson.Int(6), + MinConf: dcrjson.Int(6), Comment: nil, CommentTo: nil, }, @@ -896,102 +901,102 @@ func TestWalletSvrCmds(t *testing.T) { { name: "sendfrom optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment") + return dcrjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment") }, staticCmd: func() interface{} { - return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), - btcjson.String("comment"), nil) + return dcrjson.NewSendFromCmd("from", "1Address", 0.5, dcrjson.Int(6), + dcrjson.String("comment"), nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"comment"],"id":1}`, - unmarshalled: &btcjson.SendFromCmd{ + unmarshalled: &dcrjson.SendFromCmd{ FromAccount: "from", ToAddress: "1Address", Amount: 0.5, - MinConf: btcjson.Int(6), - Comment: btcjson.String("comment"), + MinConf: dcrjson.Int(6), + Comment: dcrjson.String("comment"), CommentTo: nil, }, }, { name: "sendfrom optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment", "commentto") + return dcrjson.NewCmd("sendfrom", "from", "1Address", 0.5, 6, "comment", "commentto") }, staticCmd: func() interface{} { - return btcjson.NewSendFromCmd("from", "1Address", 0.5, btcjson.Int(6), - btcjson.String("comment"), btcjson.String("commentto")) + return dcrjson.NewSendFromCmd("from", "1Address", 0.5, dcrjson.Int(6), + dcrjson.String("comment"), dcrjson.String("commentto")) }, marshalled: `{"jsonrpc":"1.0","method":"sendfrom","params":["from","1Address",0.5,6,"comment","commentto"],"id":1}`, - unmarshalled: &btcjson.SendFromCmd{ + unmarshalled: &dcrjson.SendFromCmd{ FromAccount: "from", ToAddress: "1Address", Amount: 0.5, - MinConf: btcjson.Int(6), - Comment: btcjson.String("comment"), - CommentTo: btcjson.String("commentto"), + MinConf: dcrjson.Int(6), + Comment: dcrjson.String("comment"), + CommentTo: dcrjson.String("commentto"), }, }, { name: "sendmany", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`) + return dcrjson.NewCmd("sendmany", "from", `{"1Address":0.5}`) }, staticCmd: func() interface{} { amounts := map[string]float64{"1Address": 0.5} - return btcjson.NewSendManyCmd("from", amounts, nil, nil) + return dcrjson.NewSendManyCmd("from", amounts, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5}],"id":1}`, - unmarshalled: &btcjson.SendManyCmd{ + unmarshalled: &dcrjson.SendManyCmd{ FromAccount: "from", Amounts: map[string]float64{"1Address": 0.5}, - MinConf: btcjson.Int(1), + MinConf: dcrjson.Int(1), Comment: nil, }, }, { name: "sendmany optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6) + return dcrjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6) }, staticCmd: func() interface{} { amounts := map[string]float64{"1Address": 0.5} - return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), nil) + return dcrjson.NewSendManyCmd("from", amounts, dcrjson.Int(6), nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6],"id":1}`, - unmarshalled: &btcjson.SendManyCmd{ + unmarshalled: &dcrjson.SendManyCmd{ FromAccount: "from", Amounts: map[string]float64{"1Address": 0.5}, - MinConf: btcjson.Int(6), + MinConf: dcrjson.Int(6), Comment: nil, }, }, { name: "sendmany optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6, "comment") + return dcrjson.NewCmd("sendmany", "from", `{"1Address":0.5}`, 6, "comment") }, staticCmd: func() interface{} { amounts := map[string]float64{"1Address": 0.5} - return btcjson.NewSendManyCmd("from", amounts, btcjson.Int(6), btcjson.String("comment")) + return dcrjson.NewSendManyCmd("from", amounts, dcrjson.Int(6), dcrjson.String("comment")) }, marshalled: `{"jsonrpc":"1.0","method":"sendmany","params":["from",{"1Address":0.5},6,"comment"],"id":1}`, - unmarshalled: &btcjson.SendManyCmd{ + unmarshalled: &dcrjson.SendManyCmd{ FromAccount: "from", Amounts: map[string]float64{"1Address": 0.5}, - MinConf: btcjson.Int(6), - Comment: btcjson.String("comment"), + MinConf: dcrjson.Int(6), + Comment: dcrjson.String("comment"), }, }, { name: "sendtoaddress", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendtoaddress", "1Address", 0.5) + return dcrjson.NewCmd("sendtoaddress", "1Address", 0.5) }, staticCmd: func() interface{} { - return btcjson.NewSendToAddressCmd("1Address", 0.5, nil, nil) + return dcrjson.NewSendToAddressCmd("1Address", 0.5, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5],"id":1}`, - unmarshalled: &btcjson.SendToAddressCmd{ + unmarshalled: &dcrjson.SendToAddressCmd{ Address: "1Address", Amount: 0.5, Comment: nil, @@ -1001,30 +1006,30 @@ func TestWalletSvrCmds(t *testing.T) { { name: "sendtoaddress optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendtoaddress", "1Address", 0.5, "comment", "commentto") + return dcrjson.NewCmd("sendtoaddress", "1Address", 0.5, "comment", "commentto") }, staticCmd: func() interface{} { - return btcjson.NewSendToAddressCmd("1Address", 0.5, btcjson.String("comment"), - btcjson.String("commentto")) + return dcrjson.NewSendToAddressCmd("1Address", 0.5, dcrjson.String("comment"), + dcrjson.String("commentto")) }, marshalled: `{"jsonrpc":"1.0","method":"sendtoaddress","params":["1Address",0.5,"comment","commentto"],"id":1}`, - unmarshalled: &btcjson.SendToAddressCmd{ + unmarshalled: &dcrjson.SendToAddressCmd{ Address: "1Address", Amount: 0.5, - Comment: btcjson.String("comment"), - CommentTo: btcjson.String("commentto"), + Comment: dcrjson.String("comment"), + CommentTo: dcrjson.String("commentto"), }, }, { name: "setaccount", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("setaccount", "1Address", "acct") + return dcrjson.NewCmd("setaccount", "1Address", "acct") }, staticCmd: func() interface{} { - return btcjson.NewSetAccountCmd("1Address", "acct") + return dcrjson.NewSetAccountCmd("1Address", "acct") }, marshalled: `{"jsonrpc":"1.0","method":"setaccount","params":["1Address","acct"],"id":1}`, - unmarshalled: &btcjson.SetAccountCmd{ + unmarshalled: &dcrjson.SetAccountCmd{ Address: "1Address", Account: "acct", }, @@ -1032,26 +1037,26 @@ func TestWalletSvrCmds(t *testing.T) { { name: "settxfee", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("settxfee", 0.0001) + return dcrjson.NewCmd("settxfee", 0.0001) }, staticCmd: func() interface{} { - return btcjson.NewSetTxFeeCmd(0.0001) + return dcrjson.NewSetTxFeeCmd(0.0001) }, marshalled: `{"jsonrpc":"1.0","method":"settxfee","params":[0.0001],"id":1}`, - unmarshalled: &btcjson.SetTxFeeCmd{ + unmarshalled: &dcrjson.SetTxFeeCmd{ Amount: 0.0001, }, }, { name: "signmessage", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("signmessage", "1Address", "message") + return dcrjson.NewCmd("signmessage", "1Address", "message") }, staticCmd: func() interface{} { - return btcjson.NewSignMessageCmd("1Address", "message") + return dcrjson.NewSignMessageCmd("1Address", "message") }, marshalled: `{"jsonrpc":"1.0","method":"signmessage","params":["1Address","message"],"id":1}`, - unmarshalled: &btcjson.SignMessageCmd{ + unmarshalled: &dcrjson.SignMessageCmd{ Address: "1Address", Message: "message", }, @@ -1059,26 +1064,26 @@ func TestWalletSvrCmds(t *testing.T) { { name: "signrawtransaction", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("signrawtransaction", "001122") + return dcrjson.NewCmd("signrawtransaction", "001122") }, staticCmd: func() interface{} { - return btcjson.NewSignRawTransactionCmd("001122", nil, nil, nil) + return dcrjson.NewSignRawTransactionCmd("001122", nil, nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"signrawtransaction","params":["001122"],"id":1}`, - unmarshalled: &btcjson.SignRawTransactionCmd{ + unmarshalled: &dcrjson.SignRawTransactionCmd{ RawTx: "001122", Inputs: nil, PrivKeys: nil, - Flags: btcjson.String("ALL"), + Flags: dcrjson.String("ALL"), }, }, { name: "signrawtransaction optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("signrawtransaction", "001122", `[{"txid":"123","vout":1,"scriptPubKey":"00","redeemScript":"01"}]`) + return dcrjson.NewCmd("signrawtransaction", "001122", `[{"txid":"123","vout":1,"tree":0,"scriptPubKey":"00","redeemScript":"01"}]`) }, staticCmd: func() interface{} { - txInputs := []btcjson.RawTxInput{ + txInputs := []dcrjson.RawTxInput{ { Txid: "123", Vout: 1, @@ -1087,12 +1092,12 @@ func TestWalletSvrCmds(t *testing.T) { }, } - return btcjson.NewSignRawTransactionCmd("001122", &txInputs, nil, nil) + return dcrjson.NewSignRawTransactionCmd("001122", &txInputs, nil, nil) }, - marshalled: `{"jsonrpc":"1.0","method":"signrawtransaction","params":["001122",[{"txid":"123","vout":1,"scriptPubKey":"00","redeemScript":"01"}]],"id":1}`, - unmarshalled: &btcjson.SignRawTransactionCmd{ + marshalled: `{"jsonrpc":"1.0","method":"signrawtransaction","params":["001122",[{"txid":"123","vout":1,"tree":0,"scriptPubKey":"00","redeemScript":"01"}]],"id":1}`, + unmarshalled: &dcrjson.SignRawTransactionCmd{ RawTx: "001122", - Inputs: &[]btcjson.RawTxInput{ + Inputs: &[]dcrjson.RawTxInput{ { Txid: "123", Vout: 1, @@ -1101,67 +1106,67 @@ func TestWalletSvrCmds(t *testing.T) { }, }, PrivKeys: nil, - Flags: btcjson.String("ALL"), + Flags: dcrjson.String("ALL"), }, }, { name: "signrawtransaction optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("signrawtransaction", "001122", `[]`, `["abc"]`) + return dcrjson.NewCmd("signrawtransaction", "001122", `[]`, `["abc"]`) }, staticCmd: func() interface{} { - txInputs := []btcjson.RawTxInput{} + txInputs := []dcrjson.RawTxInput{} privKeys := []string{"abc"} - return btcjson.NewSignRawTransactionCmd("001122", &txInputs, &privKeys, nil) + return dcrjson.NewSignRawTransactionCmd("001122", &txInputs, &privKeys, nil) }, marshalled: `{"jsonrpc":"1.0","method":"signrawtransaction","params":["001122",[],["abc"]],"id":1}`, - unmarshalled: &btcjson.SignRawTransactionCmd{ + unmarshalled: &dcrjson.SignRawTransactionCmd{ RawTx: "001122", - Inputs: &[]btcjson.RawTxInput{}, + Inputs: &[]dcrjson.RawTxInput{}, PrivKeys: &[]string{"abc"}, - Flags: btcjson.String("ALL"), + Flags: dcrjson.String("ALL"), }, }, { name: "signrawtransaction optional3", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("signrawtransaction", "001122", `[]`, `[]`, "ALL") + return dcrjson.NewCmd("signrawtransaction", "001122", `[]`, `[]`, "ALL") }, staticCmd: func() interface{} { - txInputs := []btcjson.RawTxInput{} + txInputs := []dcrjson.RawTxInput{} privKeys := []string{} - return btcjson.NewSignRawTransactionCmd("001122", &txInputs, &privKeys, - btcjson.String("ALL")) + return dcrjson.NewSignRawTransactionCmd("001122", &txInputs, &privKeys, + dcrjson.String("ALL")) }, marshalled: `{"jsonrpc":"1.0","method":"signrawtransaction","params":["001122",[],[],"ALL"],"id":1}`, - unmarshalled: &btcjson.SignRawTransactionCmd{ + unmarshalled: &dcrjson.SignRawTransactionCmd{ RawTx: "001122", - Inputs: &[]btcjson.RawTxInput{}, + Inputs: &[]dcrjson.RawTxInput{}, PrivKeys: &[]string{}, - Flags: btcjson.String("ALL"), + Flags: dcrjson.String("ALL"), }, }, { name: "walletlock", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("walletlock") + return dcrjson.NewCmd("walletlock") }, staticCmd: func() interface{} { - return btcjson.NewWalletLockCmd() + return dcrjson.NewWalletLockCmd() }, marshalled: `{"jsonrpc":"1.0","method":"walletlock","params":[],"id":1}`, - unmarshalled: &btcjson.WalletLockCmd{}, + unmarshalled: &dcrjson.WalletLockCmd{}, }, { name: "walletpassphrase", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("walletpassphrase", "pass", 60) + return dcrjson.NewCmd("walletpassphrase", "pass", 60) }, staticCmd: func() interface{} { - return btcjson.NewWalletPassphraseCmd("pass", 60) + return dcrjson.NewWalletPassphraseCmd("pass", 60) }, marshalled: `{"jsonrpc":"1.0","method":"walletpassphrase","params":["pass",60],"id":1}`, - unmarshalled: &btcjson.WalletPassphraseCmd{ + unmarshalled: &dcrjson.WalletPassphraseCmd{ Passphrase: "pass", Timeout: 60, }, @@ -1169,13 +1174,13 @@ func TestWalletSvrCmds(t *testing.T) { { name: "walletpassphrasechange", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("walletpassphrasechange", "old", "new") + return dcrjson.NewCmd("walletpassphrasechange", "old", "new") }, staticCmd: func() interface{} { - return btcjson.NewWalletPassphraseChangeCmd("old", "new") + return dcrjson.NewWalletPassphraseChangeCmd("old", "new") }, marshalled: `{"jsonrpc":"1.0","method":"walletpassphrasechange","params":["old","new"],"id":1}`, - unmarshalled: &btcjson.WalletPassphraseChangeCmd{ + unmarshalled: &dcrjson.WalletPassphraseChangeCmd{ OldPassphrase: "old", NewPassphrase: "new", }, @@ -1186,7 +1191,7 @@ func TestWalletSvrCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -1210,7 +1215,7 @@ func TestWalletSvrCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -1224,7 +1229,7 @@ func TestWalletSvrCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -1232,7 +1237,7 @@ func TestWalletSvrCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/btcjson/walletsvrresults.go b/dcrjson/walletsvrresults.go similarity index 96% rename from btcjson/walletsvrresults.go rename to dcrjson/walletsvrresults.go index 91c80682..45f5f697 100644 --- a/btcjson/walletsvrresults.go +++ b/dcrjson/walletsvrresults.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson // GetTransactionDetailsResult models the details data from the gettransaction command. // @@ -104,9 +105,12 @@ type ListSinceBlockResult struct { } // ListUnspentResult models a successful response from the listunspent request. +// Contains Decred additions. type ListUnspentResult struct { TxID string `json:"txid"` Vout uint32 `json:"vout"` + Tree int8 `json:"tree"` + TxType int `json:"txtype"` Address string `json:"address"` Account string `json:"account"` ScriptPubKey string `json:"scriptPubKey"` @@ -141,6 +145,7 @@ type ValidateAddressWalletResult struct { IsMine bool `json:"ismine,omitempty"` IsWatchOnly bool `json:"iswatchonly,omitempty"` IsScript bool `json:"isscript,omitempty"` + PubKeyAddr string `json:"pubkeyaddr,omitempty"` PubKey string `json:"pubkey,omitempty"` IsCompressed bool `json:"iscompressed,omitempty"` Account string `json:"account,omitempty"` diff --git a/btcjson/walletsvrwscmds.go b/dcrjson/walletsvrwscmds.go similarity index 98% rename from btcjson/walletsvrwscmds.go rename to dcrjson/walletsvrwscmds.go index e1e60fbe..2c5644d8 100644 --- a/btcjson/walletsvrwscmds.go +++ b/dcrjson/walletsvrwscmds.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson +package dcrjson // NOTE: This file is intended to house the RPC commands that are supported by // a wallet server, but are only available via websockets. diff --git a/btcjson/walletsvrwscmds_test.go b/dcrjson/walletsvrwscmds_test.go similarity index 67% rename from btcjson/walletsvrwscmds_test.go rename to dcrjson/walletsvrwscmds_test.go index 17144b6e..e3004732 100644 --- a/btcjson/walletsvrwscmds_test.go +++ b/dcrjson/walletsvrwscmds_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestWalletSvrWsCmds tests all of the wallet server websocket-specific @@ -32,93 +33,93 @@ func TestWalletSvrWsCmds(t *testing.T) { { name: "createencryptedwallet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("createencryptedwallet", "pass") + return dcrjson.NewCmd("createencryptedwallet", "pass") }, staticCmd: func() interface{} { - return btcjson.NewCreateEncryptedWalletCmd("pass") + return dcrjson.NewCreateEncryptedWalletCmd("pass") }, marshalled: `{"jsonrpc":"1.0","method":"createencryptedwallet","params":["pass"],"id":1}`, - unmarshalled: &btcjson.CreateEncryptedWalletCmd{Passphrase: "pass"}, + unmarshalled: &dcrjson.CreateEncryptedWalletCmd{Passphrase: "pass"}, }, { name: "exportwatchingwallet", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("exportwatchingwallet") + return dcrjson.NewCmd("exportwatchingwallet") }, staticCmd: func() interface{} { - return btcjson.NewExportWatchingWalletCmd(nil, nil) + return dcrjson.NewExportWatchingWalletCmd(nil, nil) }, marshalled: `{"jsonrpc":"1.0","method":"exportwatchingwallet","params":[],"id":1}`, - unmarshalled: &btcjson.ExportWatchingWalletCmd{ + unmarshalled: &dcrjson.ExportWatchingWalletCmd{ Account: nil, - Download: btcjson.Bool(false), + Download: dcrjson.Bool(false), }, }, { name: "exportwatchingwallet optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("exportwatchingwallet", "acct") + return dcrjson.NewCmd("exportwatchingwallet", "acct") }, staticCmd: func() interface{} { - return btcjson.NewExportWatchingWalletCmd(btcjson.String("acct"), nil) + return dcrjson.NewExportWatchingWalletCmd(dcrjson.String("acct"), nil) }, marshalled: `{"jsonrpc":"1.0","method":"exportwatchingwallet","params":["acct"],"id":1}`, - unmarshalled: &btcjson.ExportWatchingWalletCmd{ - Account: btcjson.String("acct"), - Download: btcjson.Bool(false), + unmarshalled: &dcrjson.ExportWatchingWalletCmd{ + Account: dcrjson.String("acct"), + Download: dcrjson.Bool(false), }, }, { name: "exportwatchingwallet optional2", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("exportwatchingwallet", "acct", true) + return dcrjson.NewCmd("exportwatchingwallet", "acct", true) }, staticCmd: func() interface{} { - return btcjson.NewExportWatchingWalletCmd(btcjson.String("acct"), - btcjson.Bool(true)) + return dcrjson.NewExportWatchingWalletCmd(dcrjson.String("acct"), + dcrjson.Bool(true)) }, marshalled: `{"jsonrpc":"1.0","method":"exportwatchingwallet","params":["acct",true],"id":1}`, - unmarshalled: &btcjson.ExportWatchingWalletCmd{ - Account: btcjson.String("acct"), - Download: btcjson.Bool(true), + unmarshalled: &dcrjson.ExportWatchingWalletCmd{ + Account: dcrjson.String("acct"), + Download: dcrjson.Bool(true), }, }, { name: "getunconfirmedbalance", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getunconfirmedbalance") + return dcrjson.NewCmd("getunconfirmedbalance") }, staticCmd: func() interface{} { - return btcjson.NewGetUnconfirmedBalanceCmd(nil) + return dcrjson.NewGetUnconfirmedBalanceCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"getunconfirmedbalance","params":[],"id":1}`, - unmarshalled: &btcjson.GetUnconfirmedBalanceCmd{ + unmarshalled: &dcrjson.GetUnconfirmedBalanceCmd{ Account: nil, }, }, { name: "getunconfirmedbalance optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("getunconfirmedbalance", "acct") + return dcrjson.NewCmd("getunconfirmedbalance", "acct") }, staticCmd: func() interface{} { - return btcjson.NewGetUnconfirmedBalanceCmd(btcjson.String("acct")) + return dcrjson.NewGetUnconfirmedBalanceCmd(dcrjson.String("acct")) }, marshalled: `{"jsonrpc":"1.0","method":"getunconfirmedbalance","params":["acct"],"id":1}`, - unmarshalled: &btcjson.GetUnconfirmedBalanceCmd{ - Account: btcjson.String("acct"), + unmarshalled: &dcrjson.GetUnconfirmedBalanceCmd{ + Account: dcrjson.String("acct"), }, }, { name: "listaddresstransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listaddresstransactions", `["1Address"]`) + return dcrjson.NewCmd("listaddresstransactions", `["1Address"]`) }, staticCmd: func() interface{} { - return btcjson.NewListAddressTransactionsCmd([]string{"1Address"}, nil) + return dcrjson.NewListAddressTransactionsCmd([]string{"1Address"}, nil) }, marshalled: `{"jsonrpc":"1.0","method":"listaddresstransactions","params":[["1Address"]],"id":1}`, - unmarshalled: &btcjson.ListAddressTransactionsCmd{ + unmarshalled: &dcrjson.ListAddressTransactionsCmd{ Addresses: []string{"1Address"}, Account: nil, }, @@ -126,54 +127,54 @@ func TestWalletSvrWsCmds(t *testing.T) { { name: "listaddresstransactions optional1", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listaddresstransactions", `["1Address"]`, "acct") + return dcrjson.NewCmd("listaddresstransactions", `["1Address"]`, "acct") }, staticCmd: func() interface{} { - return btcjson.NewListAddressTransactionsCmd([]string{"1Address"}, - btcjson.String("acct")) + return dcrjson.NewListAddressTransactionsCmd([]string{"1Address"}, + dcrjson.String("acct")) }, marshalled: `{"jsonrpc":"1.0","method":"listaddresstransactions","params":[["1Address"],"acct"],"id":1}`, - unmarshalled: &btcjson.ListAddressTransactionsCmd{ + unmarshalled: &dcrjson.ListAddressTransactionsCmd{ Addresses: []string{"1Address"}, - Account: btcjson.String("acct"), + Account: dcrjson.String("acct"), }, }, { name: "listalltransactions", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listalltransactions") + return dcrjson.NewCmd("listalltransactions") }, staticCmd: func() interface{} { - return btcjson.NewListAllTransactionsCmd(nil) + return dcrjson.NewListAllTransactionsCmd(nil) }, marshalled: `{"jsonrpc":"1.0","method":"listalltransactions","params":[],"id":1}`, - unmarshalled: &btcjson.ListAllTransactionsCmd{ + unmarshalled: &dcrjson.ListAllTransactionsCmd{ Account: nil, }, }, { name: "listalltransactions optional", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("listalltransactions", "acct") + return dcrjson.NewCmd("listalltransactions", "acct") }, staticCmd: func() interface{} { - return btcjson.NewListAllTransactionsCmd(btcjson.String("acct")) + return dcrjson.NewListAllTransactionsCmd(dcrjson.String("acct")) }, marshalled: `{"jsonrpc":"1.0","method":"listalltransactions","params":["acct"],"id":1}`, - unmarshalled: &btcjson.ListAllTransactionsCmd{ - Account: btcjson.String("acct"), + unmarshalled: &dcrjson.ListAllTransactionsCmd{ + Account: dcrjson.String("acct"), }, }, { name: "recoveraddresses", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("recoveraddresses", "acct", 10) + return dcrjson.NewCmd("recoveraddresses", "acct", 10) }, staticCmd: func() interface{} { - return btcjson.NewRecoverAddressesCmd("acct", 10) + return dcrjson.NewRecoverAddressesCmd("acct", 10) }, marshalled: `{"jsonrpc":"1.0","method":"recoveraddresses","params":["acct",10],"id":1}`, - unmarshalled: &btcjson.RecoverAddressesCmd{ + unmarshalled: &dcrjson.RecoverAddressesCmd{ Account: "acct", N: 10, }, @@ -181,13 +182,13 @@ func TestWalletSvrWsCmds(t *testing.T) { { name: "walletislocked", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("walletislocked") + return dcrjson.NewCmd("walletislocked") }, staticCmd: func() interface{} { - return btcjson.NewWalletIsLockedCmd() + return dcrjson.NewWalletIsLockedCmd() }, marshalled: `{"jsonrpc":"1.0","method":"walletislocked","params":[],"id":1}`, - unmarshalled: &btcjson.WalletIsLockedCmd{}, + unmarshalled: &dcrjson.WalletIsLockedCmd{}, }, } @@ -195,7 +196,7 @@ func TestWalletSvrWsCmds(t *testing.T) { for i, test := range tests { // Marshal the command as created by the new static command // creation function. - marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd()) + marshalled, err := dcrjson.MarshalCmd(testID, test.staticCmd()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -219,7 +220,7 @@ func TestWalletSvrWsCmds(t *testing.T) { // Marshal the command as created by the generic new command // creation function. - marshalled, err = btcjson.MarshalCmd(testID, cmd) + marshalled, err = dcrjson.MarshalCmd(testID, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -233,7 +234,7 @@ func TestWalletSvrWsCmds(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -241,7 +242,7 @@ func TestWalletSvrWsCmds(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/btcjson/walletsvrwsntfns.go b/dcrjson/walletsvrwsntfns.go similarity index 91% rename from btcjson/walletsvrwsntfns.go rename to dcrjson/walletsvrwsntfns.go index 26dc1508..30335bbe 100644 --- a/btcjson/walletsvrwsntfns.go +++ b/dcrjson/walletsvrwsntfns.go @@ -1,11 +1,12 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. // NOTE: This file is intended to house the RPC websocket notifications that are // supported by a wallet server. -package btcjson +package dcrjson const ( // AccountBalanceNtfnMethod is the method used for account balance @@ -14,7 +15,7 @@ const ( // BtcdConnectedNtfnMethod is the method used for notifications when // a wallet server is connected to a chain server. - BtcdConnectedNtfnMethod = "btcdconnected" + BtcdConnectedNtfnMethod = "dcrdconnected" // WalletLockStateNtfnMethod is the method used to notify the lock state // of a wallet has changed. @@ -28,7 +29,7 @@ const ( // AccountBalanceNtfn defines the accountbalance JSON-RPC notification. type AccountBalanceNtfn struct { Account string - Balance float64 // In BTC + Balance float64 // In DCR Confirmed bool // Whether Balance is confirmed or unconfirmed. } @@ -42,13 +43,13 @@ func NewAccountBalanceNtfn(account string, balance float64, confirmed bool) *Acc } } -// BtcdConnectedNtfn defines the btcdconnected JSON-RPC notification. +// BtcdConnectedNtfn defines the dcrddconnected JSON-RPC notification. type BtcdConnectedNtfn struct { Connected bool } // NewBtcdConnectedNtfn returns a new instance which can be used to issue a -// btcdconnected JSON-RPC notification. +// dcrddconnected JSON-RPC notification. func NewBtcdConnectedNtfn(connected bool) *BtcdConnectedNtfn { return &BtcdConnectedNtfn{ Connected: connected, diff --git a/btcjson/walletsvrwsntfns_test.go b/dcrjson/walletsvrwsntfns_test.go similarity index 79% rename from btcjson/walletsvrwsntfns_test.go rename to dcrjson/walletsvrwsntfns_test.go index a1b267c5..e9fccdef 100644 --- a/btcjson/walletsvrwsntfns_test.go +++ b/dcrjson/walletsvrwsntfns_test.go @@ -1,8 +1,9 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package btcjson_test +package dcrjson_test import ( "bytes" @@ -11,7 +12,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // TestWalletSvrWsNtfns tests all of the chain server websocket-specific @@ -31,56 +32,56 @@ func TestWalletSvrWsNtfns(t *testing.T) { { name: "accountbalance", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("accountbalance", "acct", 1.25, true) + return dcrjson.NewCmd("accountbalance", "acct", 1.25, true) }, staticNtfn: func() interface{} { - return btcjson.NewAccountBalanceNtfn("acct", 1.25, true) + return dcrjson.NewAccountBalanceNtfn("acct", 1.25, true) }, marshalled: `{"jsonrpc":"1.0","method":"accountbalance","params":["acct",1.25,true],"id":null}`, - unmarshalled: &btcjson.AccountBalanceNtfn{ + unmarshalled: &dcrjson.AccountBalanceNtfn{ Account: "acct", Balance: 1.25, Confirmed: true, }, }, { - name: "btcdconnected", + name: "dcrdconnected", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("btcdconnected", true) + return dcrjson.NewCmd("dcrdconnected", true) }, staticNtfn: func() interface{} { - return btcjson.NewBtcdConnectedNtfn(true) + return dcrjson.NewBtcdConnectedNtfn(true) }, - marshalled: `{"jsonrpc":"1.0","method":"btcdconnected","params":[true],"id":null}`, - unmarshalled: &btcjson.BtcdConnectedNtfn{ + marshalled: `{"jsonrpc":"1.0","method":"dcrdconnected","params":[true],"id":null}`, + unmarshalled: &dcrjson.BtcdConnectedNtfn{ Connected: true, }, }, { name: "walletlockstate", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("walletlockstate", true) + return dcrjson.NewCmd("walletlockstate", true) }, staticNtfn: func() interface{} { - return btcjson.NewWalletLockStateNtfn(true) + return dcrjson.NewWalletLockStateNtfn(true) }, marshalled: `{"jsonrpc":"1.0","method":"walletlockstate","params":[true],"id":null}`, - unmarshalled: &btcjson.WalletLockStateNtfn{ + unmarshalled: &dcrjson.WalletLockStateNtfn{ Locked: true, }, }, { name: "newtx", newNtfn: func() (interface{}, error) { - return btcjson.NewCmd("newtx", "acct", `{"account":"acct","address":"1Address","category":"send","amount":1.5,"fee":0.0001,"confirmations":1,"txid":"456","walletconflicts":[],"time":12345678,"timereceived":12345876,"vout":789,"otheraccount":"otheracct"}`) + return dcrjson.NewCmd("newtx", "acct", `{"account":"acct","address":"1Address","category":"send","amount":1.5,"fee":0.0001,"confirmations":1,"txid":"456","walletconflicts":[],"time":12345678,"timereceived":12345876,"vout":789,"otheraccount":"otheracct"}`) }, staticNtfn: func() interface{} { - result := btcjson.ListTransactionsResult{ + result := dcrjson.ListTransactionsResult{ Account: "acct", Address: "1Address", Category: "send", Amount: 1.5, - Fee: btcjson.Float64(0.0001), + Fee: dcrjson.Float64(0.0001), Confirmations: 1, TxID: "456", WalletConflicts: []string{}, @@ -89,17 +90,17 @@ func TestWalletSvrWsNtfns(t *testing.T) { Vout: 789, OtherAccount: "otheracct", } - return btcjson.NewNewTxNtfn("acct", result) + return dcrjson.NewNewTxNtfn("acct", result) }, marshalled: `{"jsonrpc":"1.0","method":"newtx","params":["acct",{"account":"acct","address":"1Address","amount":1.5,"category":"send","confirmations":1,"fee":0.0001,"time":12345678,"timereceived":12345876,"txid":"456","vout":789,"walletconflicts":[],"otheraccount":"otheracct"}],"id":null}`, - unmarshalled: &btcjson.NewTxNtfn{ + unmarshalled: &dcrjson.NewTxNtfn{ Account: "acct", - Details: btcjson.ListTransactionsResult{ + Details: dcrjson.ListTransactionsResult{ Account: "acct", Address: "1Address", Category: "send", Amount: 1.5, - Fee: btcjson.Float64(0.0001), + Fee: dcrjson.Float64(0.0001), Confirmations: 1, TxID: "456", WalletConflicts: []string{}, @@ -116,7 +117,7 @@ func TestWalletSvrWsNtfns(t *testing.T) { for i, test := range tests { // Marshal the notification as created by the new static // creation function. The ID is nil for notifications. - marshalled, err := btcjson.MarshalCmd(nil, test.staticNtfn()) + marshalled, err := dcrjson.MarshalCmd(nil, test.staticNtfn()) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -141,7 +142,7 @@ func TestWalletSvrWsNtfns(t *testing.T) { // Marshal the notification as created by the generic new // notification creation function. The ID is nil for // notifications. - marshalled, err = btcjson.MarshalCmd(nil, cmd) + marshalled, err = dcrjson.MarshalCmd(nil, cmd) if err != nil { t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) @@ -155,7 +156,7 @@ func TestWalletSvrWsNtfns(t *testing.T) { continue } - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(marshalled, &request); err != nil { t.Errorf("Test #%d (%s) unexpected error while "+ "unmarshalling JSON-RPC request: %v", i, @@ -163,7 +164,7 @@ func TestWalletSvrWsNtfns(t *testing.T) { continue } - cmd, err = btcjson.UnmarshalCmd(&request) + cmd, err = dcrjson.UnmarshalCmd(&request) if err != nil { t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i, test.name, err) diff --git a/discovery.go b/discovery.go index 2117c45d..63098209 100644 --- a/discovery.go +++ b/discovery.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -135,7 +136,7 @@ func torLookupIP(host, proxy string) ([]net.IP, error) { // seeders. If proxy is not "" then it is used as a tor proxy for the // resolution. func dnsDiscover(seeder string) ([]net.IP, error) { - peers, err := btcdLookup(seeder) + peers, err := dcrdLookup(seeder) if err != nil { return nil, err } diff --git a/doc.go b/doc.go index d98cd830..b5015578 100644 --- a/doc.go +++ b/doc.go @@ -1,24 +1,25 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -btcd is a full-node bitcoin implementation written in Go. +dcrd is a full-node decred implementation written in Go. -The default options are sane for most users. This means btcd will work 'out of +The default options are sane for most users. This means dcrd will work 'out of the box' for most users. However, there are also a wide variety of flags that can be used to control it. The following section provides a usage overview which enumerates the flags. An interesting point to note is that the long form of all of these options (except -C) can be specified in a configuration file that is automatically -parsed when btcd starts up. By default, the configuration file is located at -~/.btcd/btcd.conf on POSIX-style operating systems and %LOCALAPPDATA%\btcd\btcd.conf +parsed when dcrd starts up. By default, the configuration file is located at +~/.dcrd/dcrd.conf on POSIX-style operating systems and %LOCALAPPDATA%\dcrd\dcrd.conf on Windows. The -C (--configfile) flag, as shown below, can be used to override this location. Usage: - btcd [OPTIONS] + dcrd [OPTIONS] Application Options: -V, --version Display version information and exit @@ -31,7 +32,7 @@ Application Options: or --proxy options are used without also specifying listen interfaces via --listen --listen= Add an interface/port to listen for connections - (default all interfaces port: 8333, testnet: 18333) + (default all interfaces port: 9108, testnet: 19108) --maxpeers= Max number of inbound and outbound peers (125) --banduration= How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second (24h0m0s) @@ -84,7 +85,7 @@ Application Options: (15) --maxorphantx= Max number of orphan transactions to keep in memory (1000) - --generate= Generate (mine) bitcoins using the CPU + --generate= Generate (mine) decreds using the CPU --miningaddr= Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set diff --git a/docs/README.md b/docs/README.md index 113156aa..1fc7287e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,7 +5,7 @@ 1. [Windows](#WindowsInstallation) 2. [Linux/BSD/MacOSX/POSIX](#PosixInstallation) 2. [Configuration](#Configuration) - 3. [Controlling and Querying btcd via btcctl](#BtcctlConfig) + 3. [Controlling and Querying dcrd via dcrctl](#DcrctlConfig) 4. [Mining](#Mining) 3. [Help](#Help) 1. [Startup](#Startup) @@ -18,11 +18,11 @@ 5. [Developer Resources](#DeveloperResources) 1. [Code Contribution Guidelines](#ContributionGuidelines) 2. [JSON-RPC Reference](#JSONRPCReference) - 3. [The btcsuite Bitcoin-related Go Packages](#GoPackages) + 3. [The btcsuite Decred-related Go Packages](#GoPackages) ### 1. About -btcd is a full node bitcoin implementation written in [Go](http://golang.org), +dcrd is a full node decred implementation written in [Go](http://golang.org), licensed under the [copyfree](http://www.copyfree.org) ISC License. This project is currently under active development and is in a Beta state. It is @@ -30,13 +30,6 @@ extremely stable and has been in production use for over 6 months as of May 2014, however there are still a couple of major features we want to add before we come out of beta. -It currently properly downloads, validates, and serves the block chain using the -exact rules (including bugs) for block acceptance as the reference -implementation, [bitcoind](https://github.com/bitcoin/bitcoin). We have taken -great care to avoid btcd causing a fork to the block chain. It passes all of -the '[official](https://github.com/TheBlueMatt/test-scripts/)' block acceptance -tests. - It also properly relays newly mined blocks, maintains a transaction pool, and relays individual transactions that have not yet made it into a block. It ensures all individual transactions admitted to the pool follow the rules @@ -44,54 +37,45 @@ required into the block chain and also includes the vast majority of the more strict checks which filter transactions based on miner requirements ("standard" transactions). -One key difference between btcd and bitcoind is that btcd does NOT include -wallet functionality and this was a very intentional design decision. See the -[blog entry](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon) for -more details. This means you can't actually make or receive payments directly -with btcd. That functionality is provided by the -[btcwallet](https://github.com/btcsuite/btcwallet) and -[btcgui](https://github.com/btcsuite/btcgui) projects which are both under -active development. - ### 2. Getting Started **2.1 Installation**
-The first step is to install btcd. See one of the following sections for +The first step is to install dcrd. See one of the following sections for details on how to install on the supported operating systems.
**2.1.1 Windows Installation**
-* Install the MSI available at: https://github.com/btcsuite/btcd/releases -* Launch btcd from the Start Menu +* Install the MSI available at: https://github.com/decred/dcrd/releases +* Launch dcrd from the Start Menu
**2.1.2 Linux/BSD/MacOSX/POSIX Installation**
* Install Go according to the installation instructions here: http://golang.org/doc/install * Run the following command to ensure your Go version is at least version 1.2: `$ go version` -* Run the following command to obtain btcd, its dependencies, and install it: `$ go get github.com/btcsuite/btcd/...`
- * To upgrade, run the following command: `$ go get -u github.com/btcsuite/btcd/...` -* Run btcd: `$ btcd` +* Run the following command to obtain dcrd, its dependencies, and install it: `$ go get github.com/decred/dcrd/...`
+ * To upgrade, run the following command: `$ go get -u github.com/decred/dcrd/...` +* Run dcrd: `$ dcrd`
**2.2 Configuration**
-btcd has a number of [configuration](http://godoc.org/github.com/btcsuite/btcd) -options, which can be viewed by running: `$ btcd --help`. +dcrd has a number of [configuration](http://godoc.org/github.com/decred/dcrd) +options, which can be viewed by running: `$ dcrd --help`. -
-**2.3 Controlling and Querying btcd via btcctl**
+
+**2.3 Controlling and Querying dcrd via dcrctl**
-btcctl is a command line utility that can be used to both control and query btcd -via [RPC](http://www.wikipedia.org/wiki/Remote_procedure_call). btcd does +dcrctl is a command line utility that can be used to both control and query dcrd +via [RPC](http://www.wikipedia.org/wiki/Remote_procedure_call). dcrd does **not** enable its RPC server by default; You must configure at minimum both an RPC username and password or both an RPC limited username and password: -* btcd.conf configuration file +* dcrd.conf configuration file ``` [Application Options] rpcuser=myuser @@ -99,7 +83,7 @@ rpcpass=SomeDecentp4ssw0rd rpclimituser=mylimituser rpclimitpass=Limitedp4ssw0rd ``` -* btcctl.conf configuration file +* dcrctl.conf configuration file ``` [Application Options] rpcuser=myuser @@ -111,11 +95,11 @@ OR rpclimituser=mylimituser rpclimitpass=Limitedp4ssw0rd ``` -For a list of available options, run: `$ btcctl --help` +For a list of available options, run: `$ dcrctl --help`
**2.4 Mining**
-btcd supports both the `getwork` and `getblocktemplate` RPCs although the +dcrd supports both the `getwork` and `getblocktemplate` RPCs although the `getwork` RPC is deprecated and will likely be removed in a future release. The limited user cannot access these RPCs.
@@ -129,16 +113,16 @@ miningaddr=12c6DSiU4Rq3P4ZxziKxzrL5LmMBrzjrJX miningaddr=1M83ju3EChKYyysmM2FXtLNftbacagd8FR ``` -**2. Add btcd's RPC TLS certificate to system Certificate Authority list.**
+**2. Add dcrd's RPC TLS certificate to system Certificate Authority list.**
`cgminer` uses [curl](http://curl.haxx.se/) to fetch data from the RPC server. -Since curl validates the certificate by default, we must install the `btcd` RPC +Since curl validates the certificate by default, we must install the `dcrd` RPC certificate into the default system Certificate Authority list. **Ubuntu**
-1. Copy rpc.cert to /usr/share/ca-certificates: `# cp /home/user/.btcd/rpc.cert /usr/share/ca-certificates/btcd.crt`
-2. Add btcd.crt to /etc/ca-certificates.conf: `# echo btcd.crt >> /etc/ca-certificates.conf`
+1. Copy rpc.cert to /usr/share/ca-certificates: `# cp /home/user/.dcrd/rpc.cert /usr/share/ca-certificates/dcrd.crt`
+2. Add dcrd.crt to /etc/ca-certificates.conf: `# echo dcrd.crt >> /etc/ca-certificates.conf`
3. Update the CA certificate list: `# update-ca-certificates`
**3. Set your mining software url to use https.**
@@ -151,26 +135,26 @@ certificate into the default system Certificate Authority list.
**3.1 Startup**
-Typically btcd will run and start downloading the block chain with no extra +Typically dcrd will run and start downloading the block chain with no extra configuration necessary, however, there is an optional method to use a `bootstrap.dat` file that may speed up the initial block chain download process.
**3.1.1 bootstrap.dat**
-* [Using bootstrap.dat](https://github.com/btcsuite/btcd/tree/master/docs/using_bootstrap_dat.md) +* [Using bootstrap.dat](https://github.com/decred/dcrd/tree/master/docs/using_bootstrap_dat.md)
**3.1.2 Network Configuration**
-* [What Ports Are Used by Default?](https://github.com/btcsuite/btcd/tree/master/docs/default_ports.md) -* [How To Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_peer_server_listen_interfaces.md) -* [How To Configure RPC Server to Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_rpc_server_listen_interfaces.md) -* [Configuring btcd with Tor](https://github.com/btcsuite/btcd/tree/master/docs/configuring_tor.md) +* [What Ports Are Used by Default?](https://github.com/decred/dcrd/tree/master/docs/default_ports.md) +* [How To Listen on Specific Interfaces](https://github.com/decred/dcrd/tree/master/docs/configure_peer_server_listen_interfaces.md) +* [How To Configure RPC Server to Listen on Specific Interfaces](https://github.com/decred/dcrd/tree/master/docs/configure_rpc_server_listen_interfaces.md) +* [Configuring dcrd with Tor](https://github.com/decred/dcrd/tree/master/docs/configuring_tor.md)
**3.1 Wallet**
-btcd was intentionally developed without an integrated wallet for security -reasons. Please see [btcwallet](https://github.com/btcsuite/btcwallet) for more +dcrd was intentionally developed without an integrated wallet for security +reasons. Please see [btcwallet](https://github.com/decred/btcwallet) for more information.
@@ -178,42 +162,37 @@ information. **4.1 IRC**
-* [irc.freenode.net](irc://irc.freenode.net), channel #btcd +* [irc.freenode.net](irc://irc.freenode.net), channel #dcrd
**4.2 Mailing Lists**
-*
btcd: discussion - of btcd and its packages. -* btcd-commits: +* dcrd: discussion + of dcrd and its packages. +* dcrd-commits: readonly mail-out of source code changes. ### 5. Developer Resources -* [Code Contribution Guidelines](https://github.com/btcsuite/btcd/tree/master/docs/code_contribution_guidelines.md) +* [Code Contribution Guidelines](https://github.com/decred/dcrd/tree/master/docs/code_contribution_guidelines.md) -* [JSON-RPC Reference](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md) - * [RPC Examples](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md#ExampleCode) +* [JSON-RPC Reference](https://github.com/decred/dcrd/tree/master/docs/json_rpc_api.md) + * [RPC Examples](https://github.com/decred/dcrd/tree/master/docs/json_rpc_api.md#ExampleCode) -* The btcsuite Bitcoin-related Go Packages: - * [btcrpcclient](https://github.com/btcsuite/btcrpcclient) - Implements a - robust and easy to use Websocket-enabled Bitcoin JSON-RPC client - * [btcjson](https://github.com/btcsuite/btcjson) - Provides an extensive API - for the underlying JSON-RPC command and return values - * [btcws](https://github.com/btcsuite/btcws) - Custom types for btcd - websocket extension commands (registers the extension commands with - [btcjson](https://github.com/btcsuite/btcjson)) - * [wire](https://github.com/btcsuite/btcd/tree/master/wire) - Implements the - Bitcoin wire protocol - * [blockchain](https://github.com/btcsuite/btcd/tree/master/blockchain) - - Implements Bitcoin block handling and chain selection rules - * [txscript](https://github.com/btcsuite/btcd/tree/master/txscript) - - Implements the Bitcoin transaction scripting language - * [btcec](https://github.com/btcsuite/btcd/tree/master/btcec) - Implements +* The decred Decred-related Go Packages: + * [btcrpcclient](https://github.com/decred/dcrrpcclient) - Implements a + robust and easy to use Websocket-enabled Decred JSON-RPC client + * [wire](https://github.com/decred/dcrd/tree/master/wire) - Implements the + Decred wire protocol + * [blockchain](https://github.com/decred/dcrd/tree/master/blockchain) - + Implements Decred block handling and chain selection rules + * [txscript](https://github.com/decred/dcrd/tree/master/txscript) - + Implements the Decred transaction scripting language + * [btcec](https://github.com/decred/dcrd/tree/master/dcrec) - Implements support for the elliptic curve cryptographic functions needed for the - Bitcoin scripts - * [database](https://github.com/btcsuite/btcd/tree/master/database) - - Provides a database interface for the Bitcoin block chain - * [btcutil](https://github.com/btcsuite/btcutil) - Provides Bitcoin-specific + Decred scripts + * [database](https://github.com/decred/dcrd/tree/master/database) - + Provides a database interface for the Decred block chain + * [btcutil](https://github.com/decred/dcrutil) - Provides Decred-specific convenience functions and types diff --git a/docs/code_contribution_guidelines.md b/docs/code_contribution_guidelines.md index 0de7c317..762f28c4 100644 --- a/docs/code_contribution_guidelines.md +++ b/docs/code_contribution_guidelines.md @@ -30,7 +30,7 @@ represent real money and introducing bugs and security vulnerabilities can have far more dire consequences than in typical projects where having a small bug is minimal by comparison. In the world of cryptocurrencies, even the smallest bug in the wrong area can cost people a significant amount of money. For this -reason, the btcd suite has a formalized and rigorous development process which +reason, the dcrd suite has a formalized and rigorous development process which is outlined on this page. We highly encourage code contributions, however it is imperative that you adhere @@ -64,7 +64,7 @@ security and performance implications. ### 3. Required Reading -- [Effective Go](http://golang.org/doc/effective_go.html) - The entire btcd +- [Effective Go](http://golang.org/doc/effective_go.html) - The entire dcrd suite follows the guidelines in this document. For your code to be accepted, it must follow the guidelinestherein. - [Original Satoshi Whitepaper](http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCkQFjAA&url=http%3A%2F%2Fbitcoin.org%2Fbitcoin.pdf&ei=os3VUuH8G4SlsASV74GoAg&usg=AFQjCNEipPLigou_1MfB7DQjXCNdlylrBg&sig2=FaHDuT5z36GMWDEnybDJLg&bvm=bv.59378465,d.b2I) - This is the white paper that started it all. Having a solid @@ -102,7 +102,7 @@ This approach has several benefits: ### 4.2 Testing -One of the major design goals of all core btcd packages is to aim for complete +One of the major design goals of all core dcrd packages is to aim for complete test coverage. This is financial software so bugs and regressions can cost people real money. For this reason every effort must be taken to ensure the code is as accurate and bug-free as possible. Thorough testing is a good way to @@ -198,7 +198,7 @@ comment can make. ### 5. Code Approval Process This section describes the code approval process that is used for code -contributions. This is how to get your changes into btcd. +contributions. This is how to get your changes into dcrd. ### 5.1 Code Review @@ -256,7 +256,7 @@ keep a clean commit history over a tangled weave of merge commits. However, regardless of the specific merge method used, the code will be integrated with the master branch and the pull request will be closed. -Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite/btcd/graphs/contributors)! +Rejoice as you will now be listed as a [contributor](https://github.com/decred/dcrd/graphs/contributors)! ### 6. Contribution Standards @@ -283,5 +283,5 @@ Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite ### 6.2. Licensing of Contributions **** All contributions must be licensed with the -[ISC license](https://github.com/btcsuite/btcd/blob/master/LICENSE). This is -the same license as all of the code in the btcd suite. +[ISC license](https://github.com/decred/dcrd/blob/master/LICENSE). This is +the same license as all of the code in the dcrd suite. diff --git a/docs/configure_peer_server_listen_interfaces.md b/docs/configure_peer_server_listen_interfaces.md index 26f5ec72..014954d8 100644 --- a/docs/configure_peer_server_listen_interfaces.md +++ b/docs/configure_peer_server_listen_interfaces.md @@ -1,4 +1,4 @@ -btcd allows you to bind to specific interfaces which enables you to setup +dcrd allows you to bind to specific interfaces which enables you to setup configurations with varying levels of complexity. The listen parameter can be specified on the command line as shown below with the -- prefix or in the configuration file without the -- prefix (as can all long command line options). @@ -25,7 +25,7 @@ Command Line Examples: |--listen=127.0.0.1:8337 --listen=[::1]:8333|IPv4 localhost on port 8337 and IPv6 localhost on port 8333| |--listen=:8333 --listen=:8337|all interfaces on ports 8333 and 8337| -The following config file would configure btcd to only listen on localhost for both IPv4 and IPv6: +The following config file would configure dcrd to only listen on localhost for both IPv4 and IPv6: ```text [Application Options] diff --git a/docs/configure_rpc_server_listen_interfaces.md b/docs/configure_rpc_server_listen_interfaces.md index 3115d6a1..57a8bf7d 100644 --- a/docs/configure_rpc_server_listen_interfaces.md +++ b/docs/configure_rpc_server_listen_interfaces.md @@ -1,4 +1,4 @@ -btcd allows you to bind the RPC server to specific interfaces which enables you +dcrd allows you to bind the RPC server to specific interfaces which enables you to setup configurations with varying levels of complexity. The `rpclisten` parameter can be specified on the command line as shown below with the -- prefix or in the configuration file without the -- prefix (as can all long command line @@ -38,7 +38,7 @@ Command Line Examples: |--rpclisten=127.0.0.1:8337 --listen=[::1]:8334|IPv4 localhost on port 8337 and IPv6 localhost on port 8334| |--rpclisten=:8334 --listen=:8337|all interfaces on ports 8334 and 8337| -The following config file would configure the btcd RPC server to listen to all interfaces on the default port, including external interfaces, for both IPv4 and IPv6: +The following config file would configure the dcrd RPC server to listen to all interfaces on the default port, including external interfaces, for both IPv4 and IPv6: ```text [Application Options] diff --git a/docs/configuring_tor.md b/docs/configuring_tor.md index d7afb872..825f2a92 100644 --- a/docs/configuring_tor.md +++ b/docs/configuring_tor.md @@ -20,10 +20,10 @@ ### 1. Overview -btcd provides full support for anonymous networking via the +dcrd provides full support for anonymous networking via the [Tor Project](https://www.torproject.org/), including [client-only](#Client) and [hidden service](#HiddenService) configurations along with -[stream isolation](#TorStreamIsolation). In addition, btcd supports a hybrid, +[stream isolation](#TorStreamIsolation). In addition, dcrd supports a hybrid, [bridge mode](#Bridge) which is not anonymous, but allows it to operate as a bridge between regular nodes and hidden service nodes without routing the regular connections through Tor. @@ -39,15 +39,15 @@ hidden service for this reason. **2.1 Description**
-Configuring btcd as a Tor client is straightforward. The first step is +Configuring dcrd as a Tor client is straightforward. The first step is obviously to install Tor and ensure it is working. Once that is done, all that -typically needs to be done is to specify the `--proxy` flag via the btcd command -line or in the btcd configuration file. Typically the Tor proxy address will be +typically needs to be done is to specify the `--proxy` flag via the dcrd command +line or in the dcrd configuration file. Typically the Tor proxy address will be 127.0.0.1:9050 (if using standalone Tor) or 127.0.0.1:9150 (if using the Tor Browser Bundle). If you have Tor configured to require a username and password, you may specify them with the `--proxyuser` and `--proxypass` flags. -By default, btcd assumes the proxy specified with `--proxy` is a Tor proxy and +By default, dcrd assumes the proxy specified with `--proxy` is a Tor proxy and hence will send all traffic, including DNS resolution requests, via the specified proxy. @@ -59,7 +59,7 @@ not be reachable for inbound connections unless you also configure a Tor **2.2 Command Line Example**
```bash -$ ./btcd --proxy=127.0.0.1:9050 +$ ./dcrd --proxy=127.0.0.1:9050 ```
@@ -81,7 +81,7 @@ The first step is to configure Tor to provide a hidden service. Documentation for this can be found on the Tor project website [here](https://www.torproject.org/docs/tor-hidden-service.html.en). However, there is no need to install a web server locally as the linked instructions -discuss since btcd will act as the server. +discuss since dcrd will act as the server. In short, the instructions linked above entail modifying your `torrc` file to add something similar to the following, restarting Tor, and opening the @@ -89,12 +89,12 @@ add something similar to the following, restarting Tor, and opening the address. ```text -HiddenServiceDir /var/tor/btcd +HiddenServiceDir /var/tor/dcrd HiddenServicePort 8333 127.0.0.1:8333 ``` Once Tor is configured to provide the hidden service and you have obtained your -generated .onion address, configuring btcd as a Tor hidden service requires +generated .onion address, configuring dcrd as a Tor hidden service requires three flags: * `--proxy` to identify the Tor (SOCKS 5) proxy to use for outgoing traffic. This is typically 127.0.0.1:9050. @@ -106,7 +106,7 @@ three flags: **3.2 Command Line Example**
```bash -$ ./btcd --proxy=127.0.0.1:9050 --listen=127.0.0.1 --externalip=fooanon.onion +$ ./dcrd --proxy=127.0.0.1:9050 --listen=127.0.0.1 --externalip=fooanon.onion ```
@@ -126,13 +126,13 @@ externalip=fooanon.onion **4.1 Description**
-btcd provides support for operating as a bridge between regular nodes and hidden +dcrd provides support for operating as a bridge between regular nodes and hidden service nodes. In particular this means only traffic which is directed to or from a .onion address is sent through Tor while other traffic is sent normally. _As a result, this mode is **NOT** anonymous._ This mode works by specifying an onion-specific proxy, which is pointed at Tor, -by using the `--onion` flag via the btcd command line or in the btcd +by using the `--onion` flag via the dcrd command line or in the dcrd configuration file. If you have Tor configured to require a username and password, you may specify them with the `--onionuser` and `--onionpass` flags. @@ -147,7 +147,7 @@ routed via Tor due to the `--onion` flag. **4.2 Command Line Example**
```bash -$ ./btcd --onion=127.0.0.1:9050 --externalip=fooanon.onion +$ ./dcrd --onion=127.0.0.1:9050 --externalip=fooanon.onion ```
@@ -169,14 +169,14 @@ externalip=fooanon.onion Tor stream isolation forces Tor to build a new circuit for each connection making it harder to correlate connections. -btcd provides support for Tor stream isolation by using the `--torisolation` +dcrd provides support for Tor stream isolation by using the `--torisolation` flag. This option requires --proxy or --onionproxy to be set. **5.2 Command Line Example**
```bash -$ ./btcd --proxy=127.0.0.1:9050 --torisolation +$ ./dcrd --proxy=127.0.0.1:9050 --torisolation ```
diff --git a/docs/default_ports.md b/docs/default_ports.md index 14e4eea2..0d7a620a 100644 --- a/docs/default_ports.md +++ b/docs/default_ports.md @@ -1,15 +1,15 @@ -While btcd is highly configurable when it comes to the network configuration, +While dcrd is highly configurable when it comes to the network configuration, the following is intended to be a quick reference for the default ports used so port forwarding can be configured as required. -btcd provides a `--upnp` flag which can be used to automatically map the bitcoin +dcrd provides a `--upnp` flag which can be used to automatically map the bitcoin peer-to-peer listening port if your router supports UPnP. If your router does not support UPnP, or you don't wish to use it, please note that only the bitcoin peer-to-peer port should be forwarded unless you specifically want to allow RPC -access to your btcd from external sources such as in more advanced network +access to your dcrd from external sources such as in more advanced network configurations. |Name|Port| |----|----| -|Default Bitcoin peer-to-peer port|TCP 8333| -|Default RPC port|TCP 8334| +|Default Decred peer-to-peer port|TCP 9108| +|Default RPC port|TCP 9109| diff --git a/docs/json_rpc_api.md b/docs/json_rpc_api.md index 7d3620f4..f47d1edd 100644 --- a/docs/json_rpc_api.md +++ b/docs/json_rpc_api.md @@ -25,32 +25,32 @@ ### 1. Overview -btcd provides a [JSON-RPC](http://json-rpc.org/wiki/specification) API that is +dcrd provides a [JSON-RPC](http://json-rpc.org/wiki/specification) API that is fully compatible with the original bitcoind/bitcoin-qt. There are a few key -differences between btcd and bitcoind as far as how RPCs are serviced: +differences between dcrd and bitcoind as far as how RPCs are serviced: * Unlike bitcoind that has the wallet and chain intermingled in the same process - which leads to several issues, btcd intentionally splits the wallet and chain + which leads to several issues, dcrd intentionally splits the wallet and chain services into independent processes. See the blog post [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon/) for further details on why they were separated. This means that if you are - talking directly to btcd, only chain-related RPCs are available. However both + talking directly to dcrd, only chain-related RPCs are available. However both chain-related and wallet-related RPCs are available via - [btcwallet](https://github.com/btcsuite/btcwallet). -* btcd is secure by default which means that the RPC connection is TLS-enabled + [dcrwallet](https://github.com/decred/dcrwallet). +* dcrd is secure by default which means that the RPC connection is TLS-enabled by default -* btcd provides access to the API through both +* dcrd provides access to the API through both [HTTP POST](http://en.wikipedia.org/wiki/POST_%28HTTP%29) requests and [Websockets](http://en.wikipedia.org/wiki/WebSocket) -Websockets are the preferred transport for btcd RPC and are used by applications -such as [btcwallet](https://github.com/btcsuite/btcwallet) for inter-process -communication with btcd. The websocket connection endpoint for btcd is +Websockets are the preferred transport for dcrd RPC and are used by applications +such as [dcrwallet](https://github.com/decred/dcrwallet) for inter-process +communication with dcrd. The websocket connection endpoint for dcrd is `wss://your_ip_or_domain:8334/ws`. In addition to the [standard API](#Methods), an [extension API](#WSExtMethods) has been developed that is exclusive to clients using Websockets. In its current state, this API attempts to cover features found missing in the standard API -during the development of btcwallet. +during the development of dcrwallet. While the [standard API](#Methods) is stable, the [Websocket extension API](#WSExtMethods) should be considered a work in @@ -61,7 +61,7 @@ The original bitcoind/bitcoin-qt JSON-RPC API documentation is available at [htt ### 2. HTTP POST Versus Websockets -The btcd RPC server supports both [HTTP POST](http://en.wikipedia.org/wiki/POST_%28HTTP%29) +The dcrd RPC server supports both [HTTP POST](http://en.wikipedia.org/wiki/POST_%28HTTP%29) requests and the preferred [Websockets](http://en.wikipedia.org/wiki/WebSocket). All of the [standard](#Methods) and [extension](#ExtensionMethods) methods described in this documentation can be accessed through both. As the name @@ -69,7 +69,7 @@ indicates, the [Websocket-specific extension](#WSExtMethods) methods can only be accessed when connected via Websockets. As mentioned in the [overview](#Overview), the websocket connection endpoint for -btcd is `wss://your_ip_or_domain:8334/ws`. +dcrd is `wss://your_ip_or_domain:8334/ws`. The most important differences between the two transports as it pertains to the JSON-RPC API are: @@ -87,18 +87,18 @@ JSON-RPC API are: **3.1 Authentication Overview**
The following authentication details are needed before establishing a connection -to a btcd RPC server: +to a dcrd RPC server: -* **rpcuser** is the full-access username configured for the btcd RPC server -* **rpcpass** is the full-access password configured for the btcd RPC server -* **rpclimituser** is the limited username configured for the btcd RPC server -* **rpclimitpass** is the limited password configured for the btcd RPC server -* **rpccert** is the PEM-encoded X.509 certificate (public key) that the btcd - server is configured with. It is automatically generated by btcd and placed - in the btcd home directory (which is typically `%LOCALAPPDATA%\Btcd` on - Windows and `~/.btcd` on POSIX-like OSes) +* **rpcuser** is the full-access username configured for the dcrd RPC server +* **rpcpass** is the full-access password configured for the dcrd RPC server +* **rpclimituser** is the limited username configured for the dcrd RPC server +* **rpclimitpass** is the limited password configured for the dcrd RPC server +* **rpccert** is the PEM-encoded X.509 certificate (public key) that the dcrd + server is configured with. It is automatically generated by dcrd and placed + in the dcrd home directory (which is typically `%LOCALAPPDATA%\Dcrd` on + Windows and `~/.dcrd` on POSIX-like OSes) -**NOTE:** As mentioned above, btcd is secure by default which means the RPC +**NOTE:** As mentioned above, dcrd is secure by default which means the RPC server is not running unless configured with a **rpcuser** and **rpcpass** and/or a **rpclimituser** and **rpclimitpass**, and uses TLS authentication for all connections. @@ -111,7 +111,7 @@ two, mutually exclusive, methods.
**3.2 HTTP Basic Access Authentication**
-The btcd RPC server uses HTTP [basic access authentication](http://en.wikipedia.org/wiki/Basic_access_authentication) with the **rpcuser** +The dcrd RPC server uses HTTP [basic access authentication](http://en.wikipedia.org/wiki/Basic_access_authentication) with the **rpcuser** and **rpcpass** detailed above. If the supplied credentials are invalid, you will be disconnected immediately upon making the connection. @@ -131,8 +131,8 @@ authenticated will cause the websocket to be closed immediately.
### 4. Command-line Utility -btcd comes with a separate utility named `btcctl` which can be used to issue -these RPC commands via HTTP POST requests to btcd after configuring it with the +dcrd comes with a separate utility named `dcrctl` which can be used to issue +these RPC commands via HTTP POST requests to dcrd after configuring it with the information in the [Authentication](#Authentication) section above. It can also be used to communicate with any server/daemon/service which provides a JSON-RPC API compatible with the original bitcoind/bitcoin-qt client. @@ -168,14 +168,14 @@ the method name for further details such as parameter and return information. |18|[getpeerinfo](#getpeerinfo)|N|Returns information about each connected network peer as an array of json objects.| |19|[getrawmempool](#getrawmempool)|Y|Returns an array of hashes for all of the transactions currently in the memory pool.| |20|[getrawtransaction](#getrawtransaction)|Y|Returns information about a transaction given its hash.| -|21|[getwork](#getwork)|N|Returns formatted hash data to work on or checks and submits solved data.
NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| +|21|[getwork](#getwork)|N|Returns formatted hash data to work on or checks and submits solved data.
NOTE: Since dcrd does not have the wallet integrated to provide payment addresses, dcrd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| |22|[help](#help)|Y|Returns a list of all commands or help for a specified command.| |23|[ping](#ping)|N|Queues a ping to be sent to each connected peer.| -|24|[sendrawtransaction](#sendrawtransaction)|Y|Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.
btcd does not yet implement the `allowhighfees` parameter, so it has no effect| -|25|[setgenerate](#setgenerate) |N|Set the server to generate coins (mine) or not.
NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| -|26|[stop](#stop)|N|Shutdown btcd.| +|24|[sendrawtransaction](#sendrawtransaction)|Y|Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.
dcrd does not yet implement the `allowhighfees` parameter, so it has no effect| +|25|[setgenerate](#setgenerate) |N|Set the server to generate coins (mine) or not.
NOTE: Since dcrd does not have the wallet integrated to provide payment addresses, dcrd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| +|26|[stop](#stop)|N|Shutdown dcrd.| |27|[submitblock](#submitblock)|Y|Attempts to submit a new serialized, hex-encoded block to the network.| -|28|[validateaddress](#validateaddress)|Y|Verifies the given address is valid. NOTE: Since btcd does not have a wallet integrated, btcd will only return whether the address is valid or not.| +|28|[validateaddress](#validateaddress)|Y|Verifies the given address is valid. NOTE: Since dcrd does not have a wallet integrated, dcrd will only return whether the address is valid or not.| |29|[verifychain](#verifychain)|N|Verifies the block chain database.|
@@ -197,7 +197,7 @@ the method name for further details such as parameter and return information. | | | |---|---| |Method|createrawtransaction| -|Parameters|1. transaction inputs (JSON array, required) - json array of json objects
`[`
  `{`
    `"txid": "hash", (string, required) the hash of the input transaction`
    `"vout": n (numeric, required) the specific output of the input transaction to redeem`
  `}, ...`
`]`
2. addresses and amounts (JSON object, required) - json object with addresses as keys and amounts as values
`{`
  `"address": n.nnn (numeric, required) the address to send to as the key and the amount in BTC as the value`
  `, ...`
`}`| +|Parameters|1. transaction inputs (JSON array, required) - json array of json objects
`[`
  `{`
    `"txid": "hash", (string, required) the hash of the input transaction`
    `"vout": n (numeric, required) the specific output of the input transaction to redeem`
  `}, ...`
`]`
2. addresses and amounts (JSON object, required) - json object with addresses as keys and amounts as values
`{`
  `"address": n.nnn (numeric, required) the address to send to as the key and the amount in DCR as the value`
  `, ...`
`}`| |Description|Returns a new transaction spending the provided inputs and sending to the provided addresses.
The transaction inputs are not signed in the created transaction.
The `signrawtransaction` RPC command provided by wallet must be used to sign the resulting transaction.| |Returns|`"transaction" (string) hex-encoded bytes of the serialized transaction`| |Example Parameters|1. transaction inputs `[{"txid":"e6da89de7a6b8508ce8f371a3d0535b04b5e108cb1a6e9284602d3bfd357c018","vout":1}]`
2. addresses and amounts `{"13cgrTP7wgbZYWrY9BZ22BV6p82QXQT3nY": 0.49213337}`| @@ -212,7 +212,7 @@ the method name for further details such as parameter and return information. |Method|decoderawtransaction| |Parameters|1. data (string, required) - serialized, hex-encoded transaction| |Description|Returns a JSON object representing the provided serialized, hex-encoded transaction.| -|Returns|`{ (json object)`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in BTC`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the bitcoin addresses associated with this output`
          `"bitcoinaddress", (string) the bitcoin address`
          `...`
        `]`
      `}`
    `}, ...`
  `]`
`}`| +|Returns|`{ (json object)`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in DCR`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the decred addresses associated with this output`
          `"decredaddress", (string) the decred address`
          `...`
        `]`
      `}`
    `}, ...`
  `]`
`}`| |Example Return|`{`
  `"txid": "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b",`
  `"version": 1,`
  `"locktime": 0,`
  `"vin": [`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6...",`
      `"sequence": 4294967295,`
    `}`
  For non-coinbase transactions:
    `{`
      `"txid": "60ac4b057247b3d0b9a8173de56b5e1be8c1d1da970511c626ef53706c66be04",`
      `"vout": 0,`
      `"scriptSig": {`
        `"asm": "3046022100cb42f8df44eca83dd0a727988dcde9384953e830b1f8004d57485e2ede1b9c8f0...",`
        `"hex": "493046022100cb42f8df44eca83dd0a727988dcde9384953e830b1f8004d57485e2ede1b9c8...",`
      `}`
      `"sequence": 4294967295,`
    `}`
  `]`
  `"vout": [`
    `{`
      `"value": 50,`
      `"n": 0,`
      `"scriptPubKey": {`
        `"asm": "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4ce...",`
        `"hex": "4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4...",`
        `"reqSigs": 1,`
        `"type": "pubkey"`
        `"addresses": [`
          `"1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa",`
        `]`
      `}`
    `}`
  `]`
`}`| [Return to Overview](#MethodOverview)
@@ -224,7 +224,7 @@ the method name for further details such as parameter and return information. |Method|decodescript| |Parameters|1. script (string, required) - hex-encoded script| |Description|Returns a JSON object with information about the provided hex-encoded script.| -|Returns|`{ (json object)`
  `"asm": "asm", (string) disassembly of the script`
  `"reqSigs": n, (numeric) the number of required signatures`
  `"type": "scripttype", (string) the type of the script (e.g. 'pubkeyhash')`
  `"addresses": [ (json array of string) the bitcoin addresses associated with this script`
    `"bitcoinaddress", (string) the bitcoin address`
    `...`
  `]`
  `"p2sh": "scripthash", (string) the script hash for use in pay-to-script-hash transactions`
`}`| +|Returns|`{ (json object)`
  `"asm": "asm", (string) disassembly of the script`
  `"reqSigs": n, (numeric) the number of required signatures`
  `"type": "scripttype", (string) the type of the script (e.g. 'pubkeyhash')`
  `"addresses": [ (json array of string) the decred addresses associated with this script`
    `"decredaddress", (string) the decred address`
    `...`
  `]`
  `"p2sh": "scripthash", (string) the script hash for use in pay-to-script-hash transactions`
`}`| |Example Return|`{`
  `"asm": "OP_DUP OP_HASH160 b0a4d8a91981106e4ed85165a66748b19f7b7ad4 OP_EQUALVERIFY OP_CHECKSIG",`
  `"reqSigs": 1,`
  `"type": "pubkeyhash",`
  `"addresses": [`
    `"1H71QVBpzuLTNUh5pewaH3UTLTo2vWgcRJ"`
  `]`
  `"p2sh": "359b84ff799f48231990ff0298206f54117b08b6"`
`}`| [Return to Overview](#MethodOverview)
@@ -260,7 +260,7 @@ the method name for further details such as parameter and return information. | | | |---|---| |Method|getblock| -|Parameters|1. block hash (string, required) - the hash of the block
2. verbose (boolean, optional, default=true) - specifies the block is returned as a JSON object instead of hex-encoded string
3. verbosetx (boolean, optional, default=false) - specifies that each transaction is returned as a JSON object and only applies if the `verbose` flag is true.**This parameter is a btcd extension**| +|Parameters|1. block hash (string, required) - the hash of the block
2. verbose (boolean, optional, default=true) - specifies the block is returned as a JSON object instead of hex-encoded string
3. verbosetx (boolean, optional, default=false) - specifies that each transaction is returned as a JSON object and only applies if the `verbose` flag is true.**This parameter is a dcrd extension**| |Description|Returns information about a block given its hash.| |Returns (verbose=false)|`"data" (string) hex-encoded bytes of the serialized block`| |Returns (verbose=true, verbosetx=false)|`{ (json object)`
  `"hash": "blockhash", (string) the hash of the block (same as provided)`
  `"confirmations": n, (numeric) the number of confirmations`
  `"size": n, (numeric) the size of the block`
  `"height": n, (numeric) the height of the block in the block chain`
  `"version": n, (numeric) the block version`
  `"merkleroot": "hash", (string) root hash of the merkle tree`
  `"tx": [ (json array of string) the transaction hashes`
    `"transactionhash", (string) hash of the parent transaction`
    `...`
  `]`
  `"time": n, (numeric) the block time in seconds since 1 Jan 1970 GMT`
  `"nonce": n, (numeric) the block nonce`
  `"bits", n, (numeric) the bits which represent the block difficulty`
  `difficulty: n.nn, (numeric) the proof-of-work difficulty as a multiple of the minimum difficulty`
  `"previousblockhash": "hash", (string) the hash of the previous block`
  `"nextblockhash": "hash", (string) the hash of the next block (only if there is one)`
`}`| @@ -347,8 +347,8 @@ the method name for further details such as parameter and return information. |Method|getinfo| |Parameters|None| |Description|Returns a JSON object containing various state info.| -|Notes|NOTE: Since btcd does NOT contain wallet functionality, wallet-related fields are not returned. See getinfo in btcwallet for a version which includes that information.| -|Returns|`{ (json object)`
  `"version": n, (numeric) the version of the server`
  `"protocolversion": n, (numeric) the latest supported protocol version`
  `"blocks": n, (numeric) the number of blocks processed`
  `"timeoffset": n, (numeric) the time offset`
  `"connections": n, (numeric) the number of connected peers`
  `"proxy": "host:port", (string) the proxy used by the server`
  `"difficulty": n.nn, (numeric) the current target difficulty`
  `"testnet": true or false, (boolean) whether or not server is using testnet`
  `"relayfee": n.nn, (numeric) the minimum relay fee for non-free transactions in BTC/KB`
`}`| +|Notes|NOTE: Since dcrd does NOT contain wallet functionality, wallet-related fields are not returned. See getinfo in dcrwallet for a version which includes that information.| +|Returns|`{ (json object)`
  `"version": n, (numeric) the version of the server`
  `"protocolversion": n, (numeric) the latest supported protocol version`
  `"blocks": n, (numeric) the number of blocks processed`
  `"timeoffset": n, (numeric) the time offset`
  `"connections": n, (numeric) the number of connected peers`
  `"proxy": "host:port", (string) the proxy used by the server`
  `"difficulty": n.nn, (numeric) the current target difficulty`
  `"testnet": true or false, (boolean) whether or not server is using testnet`
  `"relayfee": n.nn, (numeric) the minimum relay fee for non-free transactions in DCR/KB`
`}`| |Example Return|`{`
  `"version": 70000`
  `"protocolversion": 70001, `
  `"blocks": 298963,`
  `"timeoffset": 0,`
  `"connections": 17,`
  `"proxy": "",`
  `"difficulty": 8000872135.97,`
  `"testnet": false,`
  `"relayfee": 0.00001,`
`}`| [Return to Overview](#MethodOverview)
@@ -397,7 +397,7 @@ the method name for further details such as parameter and return information. |Parameters|None| |Description|Returns data about each connected network peer as an array of json objects.| |Returns|`[`
  `{`
    `"addr": "host:port", (string) the ip address and port of the peer`
    `"services": "00000001", (string) the services supported by the peer`
    `"lastrecv": n, (numeric) time the last message was received in seconds since 1 Jan 1970 GMT`
    `"lastsend": n, (numeric) time the last message was sent in seconds since 1 Jan 1970 GMT`
    `"bytessent": n, (numeric) total bytes sent`
    `"bytesrecv": n, (numeric) total bytes received`
    `"conntime": n, (numeric) time the connection was made in seconds since 1 Jan 1970 GMT`
    `"pingtime": n, (numeric) number of microseconds the last ping took`
    `"pingwait": n, (numeric) number of microseconds a queued ping has been waiting for a response`
    `"version": n, (numeric) the protocol version of the peer`
    `"subver": "useragent", (string) the user agent of the peer`
    `"inbound": true_or_false, (boolean) whether or not the peer is an inbound connection`
    `"startingheight": n, (numeric) the latest block height the peer knew about when the connection was established`
    `"currentheight": n, (numeric) the latest block height the peer is known to have relayed since connected`
    `"syncnode": true_or_false, (boolean) whether or not the peer is the sync peer`
  `}, ...`
`]`| -|Example Return|`[`
  `{`
    `"addr": "178.172.xxx.xxx:8333",`
    `"services": "00000001",`
    `"lastrecv": 1388183523,`
    `"lastsend": 1388185470,`
    `"bytessent": 287592965,`
    `"bytesrecv": 780340,`
    `"conntime": 1388182973,`
    `"pingtime": 405551,`
    `"pingwait": 183023,`
    `"version": 70001,`
    `"subver": "/btcd:0.4.0/",`
    `"inbound": false,`
    `"startingheight": 276921,`
    `"currentheight": 276955,`
    `"syncnode": true,`
  `}`
`]`| +|Example Return|`[`
  `{`
    `"addr": "178.172.xxx.xxx:8333",`
    `"services": "00000001",`
    `"lastrecv": 1388183523,`
    `"lastsend": 1388185470,`
    `"bytessent": 287592965,`
    `"bytesrecv": 780340,`
    `"conntime": 1388182973,`
    `"pingtime": 405551,`
    `"pingwait": 183023,`
    `"version": 70001,`
    `"subver": "/dcrd:0.4.0/",`
    `"inbound": false,`
    `"startingheight": 276921,`
    `"currentheight": 276955,`
    `"syncnode": true,`
  `}`
`]`| [Return to Overview](#MethodOverview)
*** @@ -409,7 +409,7 @@ the method name for further details such as parameter and return information. |Parameters|1. transaction hash (string, required) - the hash of the transaction
2. verbose (int, optional, default=0) - specifies the transaction is returned as a JSON object instead of hex-encoded string| |Description|Returns information about a transaction given its hash.| |Returns (verbose=0)|`"data" (string) hex-encoded bytes of the serialized transaction`| -|Returns (verbose=1)|`{ (json object)`
  `"hex": "data", (string) hex-encoded transaction`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in BTC`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the bitcoin addresses associated with this output`
          `"bitcoinaddress", (string) the bitcoin address`
          `...`
        `]`
      `}`
    `}, ...`
  `]`
`}`| +|Returns (verbose=1)|`{ (json object)`
  `"hex": "data", (string) hex-encoded transaction`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in DCR`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the decred addresses associated with this output`
          `"decredaddress", (string) the decred address`
          `...`
        `]`
      `}`
    `}, ...`
  `]`
`}`| |Example Return (verbose=0)|`"010000000104be666c7053ef26c6110597dad1c1e81b5e6be53d17a8b9d0b34772054bac60000000`
`008c493046022100cb42f8df44eca83dd0a727988dcde9384953e830b1f8004d57485e2ede1b9c8f`
`022100fbce8d84fcf2839127605818ac6c3e7a1531ebc69277c504599289fb1e9058df0141045a33`
`76eeb85e494330b03c1791619d53327441002832f4bd618fd9efa9e644d242d5e1145cb9c2f71965`
`656e276633d4ff1a6db5e7153a0a9042745178ebe0f5ffffffff0280841e00000000001976a91406`
`f1b6703d3f56427bfcfd372f952d50d04b64bd88ac4dd52700000000001976a9146b63f291c295ee`
`abd9aee6be193ab2d019e7ea7088ac00000000`
**Newlines added for display purposes. The actual return does not contain newlines.**| |Example Return (verbose=1)|`{`
  `"hex": "01000000010000000000000000000000000000000000000000000000000000000000000000f...",`
  `"txid": "90743aad855880e517270550d2a881627d84db5265142fd1e7fb7add38b08be9",`
  `"version": 1,`
  `"locktime": 0,`
  `"vin": [`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "03708203062f503253482f04066d605108f800080100000ea2122f6f7a636f696e4065757374726174756d2f",`
      `"sequence": 0,`
    `}`
  For non-coinbase transactions:
    `{`
      `"txid": "60ac4b057247b3d0b9a8173de56b5e1be8c1d1da970511c626ef53706c66be04",`
      `"vout": 0,`
      `"scriptSig": {`
        `"asm": "3046022100cb42f8df44eca83dd0a727988dcde9384953e830b1f8004d57485e2ede1b9c8f0...",`
        `"hex": "493046022100cb42f8df44eca83dd0a727988dcde9384953e830b1f8004d57485e2ede1b9c8...",`
      `}`
      `"sequence": 4294967295,`
    `}`
  `]`
  `"vout": [`
    `{`
      `"value": 25.1394,`
      `"n": 0,`
      `"scriptPubKey": {`
        `"asm": "OP_DUP OP_HASH160 ea132286328cfc819457b9dec386c4b5c84faa5c OP_EQUALVERIFY OP_CHECKSIG",`
        `"hex": "76a914ea132286328cfc819457b9dec386c4b5c84faa5c88ac",`
        `"reqSigs": 1,`
        `"type": "pubkeyhash"`
        `"addresses": [`
          `"1NLg3QJMsMQGM5KEUaEu5ADDmKQSLHwmyh",`
        `]`
      `}`
    `}`
  `]`
`}`| [Return to Overview](#MethodOverview)
@@ -422,7 +422,7 @@ the method name for further details such as parameter and return information. |Method|getwork| |Parameters|1. data (string, optional) - The hex| |Description|Returns information about a transaction given its hash.| -|Notes|NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function. +|Notes|NOTE: Since dcrd does not have the wallet integrated to provide payment addresses, dcrd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function. |Returns (data not specified)|`{ (json object)`
  `"data": "hex", (string) hex-encoded block data`
  `"hash1": "hex", (string) (DEPRECATED) hex-encoded formatted hash buffer`
  `"midstate": "hex", (string) (DEPRECATED) hex-encoded precomputed hash state after hashing first half of the data`
  `"target": "hex", (string) the hex-encoded little-endian hash target`
`}`| |Returns (data specified)|`true` or `false` (boolean)| |Example Return (data not specified)|`{`
  `"data": "00000002c39b5d2b7a1e8f7356a1efce26b24bd15d7d906e85341ef9cec99b6a000000006474f...",`
  `"hash1": "00000000000000000000000000000000000000000000000000000000000000000000008000000...",`
  `"midstate": "ae4a80fc51476e452de855b4e20d5f33418c50fc7cae3b1ecd5badb819b8a584",`
  `"target": "0000000000000000000000000000000000000000000000008c96010000000000",`
`}`| @@ -460,9 +460,9 @@ the method name for further details such as parameter and return information. |Method|getrawmempool| |Parameters|1. verbose (boolean, optional, default=false)| |Description|Returns an array of hashes for all of the transactions currently in the memory pool.
The `verbose` flag specifies that each transaction is returned as a JSON object.| -|Notes|Since btcd does not perform any mining, the priority related fields `startingpriority` and `currentpriority` that are available when the `verbose` flag is set are always 0.| +|Notes|Since dcrd does not perform any mining, the priority related fields `startingpriority` and `currentpriority` that are available when the `verbose` flag is set are always 0.| |Returns (verbose=false)|`[ (json array of string)`
  `"transactionhash", (string) hash of the transaction`
  `...`
`]`| -|Returns (verbose=true)|`{ (json object)`
  `"transactionhash": { (json object)`
    `"size": n, (numeric) transaction size in bytes`
    `"fee" : n, (numeric) transaction fee in bitcoins`
    `"time": n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT`
    `"height": n, (numeric) block height when transaction entered the pool`
    `"startingpriority": n, (numeric) priority when transaction entered the pool`
    `"currentpriority": n, (numeric) current priority`
    `"depends": [ (json array) unconfirmed transactions used as inputs for this transaction`
      `"transactionhash", (string) hash of the parent transaction`
      `...`
    `]`
  `}, ...`
`}`| +|Returns (verbose=true)|`{ (json object)`
  `"transactionhash": { (json object)`
    `"size": n, (numeric) transaction size in bytes`
    `"fee" : n, (numeric) transaction fee in decreds`
    `"time": n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT`
    `"height": n, (numeric) block height when transaction entered the pool`
    `"startingpriority": n, (numeric) priority when transaction entered the pool`
    `"currentpriority": n, (numeric) current priority`
    `"depends": [ (json array) unconfirmed transactions used as inputs for this transaction`
      `"transactionhash", (string) hash of the parent transaction`
      `...`
    `]`
  `}, ...`
`}`| |Example Return (verbose=false)|`[`
  `"3480058a397b6ffcc60f7e3345a61370fded1ca6bef4b58156ed17987f20d4e7",`
  `"cbfe7c056a358c3a1dbced5a22b06d74b8650055d5195c1c2469e6b63a41514a"`
`]`| |Example Return (verbose=true)|`{`
  `"1697a19cede08694278f19584e8dcc87945f40c6b59a942dd8906f133ad3f9cc": {`
    `"size": 226,`
    `"fee" : 0.0001,`
    `"time": 1387992789,`
    `"height": 276836,`
    `"startingpriority": 0,`
    `"currentpriority": 0,`
    `"depends": [`
      `"aa96f672fcc5a1ec6a08a94aa46d6b789799c87bd6542967da25a96b2dee0afb",`
    `]`
`}`| [Return to Overview](#MethodOverview)
@@ -475,7 +475,7 @@ the method name for further details such as parameter and return information. |Method|setgenerate| |Parameters|1. generate (boolean, required) - `true` to enable generation, `false` to disable it
2. genproclimit (numeric, optional) - the number of processors (cores) to limit generation to or `-1` for default| |Description|Set the server to generate coins (mine) or not.| -|Notes|NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| +|Notes|NOTE: Since dcrd does not have the wallet integrated to provide payment addresses, dcrd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| |Returns|Nothing| [Return to Overview](#MethodOverview)
@@ -487,7 +487,7 @@ the method name for further details such as parameter and return information. |Method|sendrawtransaction| |Parameters|1. signedhex (string, required) serialized, hex-encoded signed transaction
2. allowhighfees (boolean, optional, default=false) whether or not to allow insanely high fees| |Description|Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.| -|Notes|btcd does not yet implement the `allowhighfees` parameter, so it has no effect| +|Notes|dcrd does not yet implement the `allowhighfees` parameter, so it has no effect| |Returns|`"hash" (string) the hash of the transaction`| |Example Return|`"1697a19cede08694278f19584e8dcc87945f40c6b59a942dd8906f133ad3f9cc"`| [Return to Overview](#MethodOverview)
@@ -510,8 +510,8 @@ the method name for further details such as parameter and return information. |---|---| |Method|stop| |Parameters|None| -|Description|Shutdown btcd.| -|Returns|`"btcd stopping."` (string)| +|Description|Shutdown dcrd.| +|Returns|`"dcrd stopping."` (string)| [Return to Overview](#MethodOverview)
*** @@ -520,9 +520,9 @@ the method name for further details such as parameter and return information. | | | |---|---| |Method|validateaddress| -|Parameters|1. address (string, required) - bitcoin address| +|Parameters|1. address (string, required) - decred address| |Description|Verify an address is valid.| -|Returns|`{ (json object)`
  `"isvalid": true or false, (bool) whether or not the address is valid.`
  `"address": "bitcoinaddress", (string) the bitcoin address validated.`
}| +|Returns|`{ (json object)`
  `"isvalid": true or false, (bool) whether or not the address is valid.`
  `"address": "decredaddress", (string) the decred address validated.`
}| [Return to Overview](#MethodOverview)
*** @@ -532,8 +532,8 @@ the method name for further details such as parameter and return information. |---|---| |Method|verifychain| |Parameters|1. checklevel (numeric, optional, default=3) - how in-depth the verification is (0=least amount of checks, higher levels are clamped to the highest supported level)
2. numblocks (numeric, optional, default=288) - the number of blocks starting from the end of the chain to verify| -|Description|Verifies the block chain database.
The actual checks performed by the `checklevel` parameter is implementation specific. For btcd this is:
`checklevel=0` - Look up each block and ensure it can be loaded from the database.
`checklevel=1` - Perform basic context-free sanity checks on each block.| -|Notes|Btcd currently only supports `checklevel` 0 and 1, but the default is still 3 for compatibility. Per the information in the Parameters section above, higher levels are automatically clamped to the highest supported level, so this means the default is effectively 1 for btcd.| +|Description|Verifies the block chain database.
The actual checks performed by the `checklevel` parameter is implementation specific. For dcrd this is:
`checklevel=0` - Look up each block and ensure it can be loaded from the database.
`checklevel=1` - Perform basic context-free sanity checks on each block.| +|Notes|Dcrd currently only supports `checklevel` 0 and 1, but the default is still 3 for compatibility. Per the information in the Parameters section above, higher levels are automatically clamped to the highest supported level, so this means the default is effectively 1 for dcrd.| |Returns|`true` or `false` (boolean)| |Example Return|`true`| [Return to Overview](#MethodOverview)
@@ -545,13 +545,13 @@ the method name for further details such as parameter and return information.
**6.1 Method Overview**
-The following is an overview of the RPC methods which are implemented by btcd, but not the original bitcoind client. Click the method name for further details such as parameter and return information. +The following is an overview of the RPC methods which are implemented by dcrd, but not the original decredd client. Click the method name for further details such as parameter and return information. |#|Method|Safe for limited user?|Description| |---|------|----------|-----------| |1|[debuglevel](#debuglevel)|N|Dynamically changes the debug logging level.| |2|[getbestblock](#getbestblock)|Y|Get block height and hash of best block in the main chain.|None| -|3|[getcurrentnet](#getcurrentnet)|Y|Get bitcoin network btcd is running on.|None| +|3|[getcurrentnet](#getcurrentnet)|Y|Get decred network dcrd is running on.|None| |4|[searchrawtransactions](#searchrawtransactions)|Y|Query for transactions related to a particular address.|None| |5|[node](#node)|N|Attempts to add or remove a peer. |None| |6|[generate](#generate)|N|When in simnet or regtest mode, generate a set number of blocks. |None| @@ -566,10 +566,10 @@ The following is an overview of the RPC methods which are implemented by btcd, b |---|---| |Method|debuglevel| |Parameters|1. _levelspec_ (string)| -|Description|Dynamically changes the debug logging level.
The levelspec can either a debug level or of the form `=,=,...`
The valid debug levels are `trace`, `debug`, `info`, `warn`, `error`, and `critical`.
The valid subsystems are `AMGR`, `ADXR`, `BCDB`, `BMGR`, `BTCD`, `CHAN`, `DISC`, `PEER`, `RPCS`, `SCRP`, `SRVR`, and `TXMP`.
Additionally, the special keyword `show` can be used to get a list of the available subsystems.| +|Description|Dynamically changes the debug logging level.
The levelspec can either a debug level or of the form `=,=,...`
The valid debug levels are `trace`, `debug`, `info`, `warn`, `error`, and `critical`.
The valid subsystems are `AMGR`, `ADXR`, `BCDB`, `BMGR`, `DCRD`, `CHAN`, `DISC`, `PEER`, `RPCS`, `SCRP`, `SRVR`, and `TXMP`.
Additionally, the special keyword `show` can be used to get a list of the available subsystems.| |Returns|string| |Example Return|`Done.`| -|Example `show` Return|`Supported subsystems [AMGR ADXR BCDB BMGR BTCD CHAN DISC PEER RPCS SCRP SRVR TXMP]`| +|Example `show` Return|`Supported subsystems [AMGR ADXR BCDB BMGR DCRD CHAN DISC PEER RPCS SCRP SRVR TXMP]`| [Return to Overview](#ExtMethodOverview)
*** @@ -592,9 +592,9 @@ The following is an overview of the RPC methods which are implemented by btcd, b |---|---| |Method|getcurrentnet| |Parameters|None| -|Description|Get bitcoin network btcd is running on.| +|Description|Get decred network dcrd is running on.| |Returns|numeric| -|Example Return|`3652501241` (mainnet)
`118034699` (testnet3)| +|Example Return|`3652501241` (mainnet)
`118034699` (testnet)| [Return to Overview](#ExtMethodOverview)
*** @@ -604,10 +604,10 @@ The following is an overview of the RPC methods which are implemented by btcd, b | | | |---|---| |Method|searchrawtransactions| -|Parameters|1. address (string, required) - bitcoin address
2. verbose (int, optional, default=true) - specifies the transaction is returned as a JSON object instead of hex-encoded string
3. skip (int, optional, default=0) - the number of leading transactions to leave out of the final response
4. count (int, optional, default=100) - the maximum number of transactions to return| +|Parameters|1. address (string, required) - decred address
2. verbose (int, optional, default=true) - specifies the transaction is returned as a JSON object instead of hex-encoded string
3. skip (int, optional, default=0) - the number of leading transactions to leave out of the final response
4. count (int, optional, default=100) - the maximum number of transactions to return| |Description|Returns raw data for transactions involving the passed address. Returned transactions are pulled from both the database, and transactions currently in the mempool. Transactions pulled from the mempool will have the `"confirmations"` field set to 0. Usage of this RPC requires the optional `--addrindex` flag to be activated, otherwise all responses will simply return with an error stating the address index has not yet been built up. Similarly, until the address index has caught up with the current best height, all requests will return an error response in order to avoid serving stale data.| |Returns (verbose=0)|`[ (json array of strings)`
   `"serializedtx", ... hex-encoded bytes of the serialized transaction`
`]` | -|Returns (verbose=1)|`[ (array of json objects)`
   `{ (json object)`
  `"hex": "data", (string) hex-encoded transaction`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in BTC`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the bitcoin addresses associated with this output`
          `"address", (string) the bitcoin address`
          `...`
        `]`
      `}`
    `}, ...`
   `]`
   `"blockhash":"hash" Hash of the block the transaction is part of.`
   `"confirmations":n, Number of numeric confirmations of block.`
   `"time":t, Transaction time in seconds since the epoch.`
   `"blocktime":t, Block time in seconds since the epoch.`
`},...`
`]`| +|Returns (verbose=1)|`[ (array of json objects)`
   `{ (json object)`
  `"hex": "data", (string) hex-encoded transaction`
  `"txid": "hash", (string) the hash of the transaction`
  `"version": n, (numeric) the transaction version`
  `"locktime": n, (numeric) the transaction lock time`
  `"vin": [ (array of json objects) the transaction inputs as json objects`
  For coinbase transactions:
    `{ (json object)`
      `"coinbase": "data", (string) the hex-dencoded bytes of the signature script`
      `"sequence": n, (numeric) the script sequence number`
    `}`
  For non-coinbase transactions:
    `{ (json object)`
      `"txid": "hash", (string) the hash of the origin transaction`
      `"vout": n, (numeric) the index of the output being redeemed from the origin transaction`
      `"scriptSig": { (json object) the signature script used to redeem the origin transaction`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
      `}`
      `"sequence": n, (numeric) the script sequence number`
    `}, ...`
  `]`
  `"vout": [ (array of json objects) the transaction outputs as json objects`
    `{ (json object)`
      `"value": n, (numeric) the value in DCR`
      `"n": n, (numeric) the index of this transaction output`
      `"scriptPubKey": { (json object) the public key script used to pay coins`
        `"asm": "asm", (string) disassembly of the script`
        `"hex": "data", (string) hex-encoded bytes of the script`
        `"reqSigs": n, (numeric) the number of required signatures`
        `"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`
        `"addresses": [ (json array of string) the decred addresses associated with this output`
          `"address", (string) the decred address`
          `...`
        `]`
      `}`
    `}, ...`
   `]`
   `"blockhash":"hash" Hash of the block the transaction is part of.`
   `"confirmations":n, Number of numeric confirmations of block.`
   `"time":t, Transaction time in seconds since the epoch.`
   `"blocktime":t, Block time in seconds since the epoch.`
`},...`
`]`| [Return to Overview](#ExtMethodOverview)
*** @@ -657,6 +657,7 @@ user. Click the method name for further details such as parameter and return in |8|[rescan](#rescan)|Rescan block chain for transactions to addresses and spent transaction outpoints.|[recvtx](#recvtx), [redeemingtx](#redeemingtx), [rescanprogress](#rescanprogress), and [rescanfinished](#rescanfinished) | |9|[notifynewtransactions](#notifynewtransactions)|Send notifications for all new transactions as they are accepted into the mempool.|[txaccepted](#txaccepted) or [txacceptedverbose](#txacceptedverbose)| |10|[stopnotifynewtransactions](#stopnotifynewtransactions)|Stop sending either a txaccepted or a txacceptedverbose notification when a new transaction is accepted into the mempool.|None| +|11|[session](#session)|Return details regarding a websocket client's current connection.|None|
**7.2 Method Details**
@@ -704,7 +705,7 @@ user. Click the method name for further details such as parameter and return in |---|---| |Method|notifyreceived| |Notifications|[recvtx](#recvtx) and [redeemingtx](#redeemingtx)| -|Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"bitcoinaddress", (string) the bitcoin address`
  `...`
 `]`| +|Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`| |Description|Send a recvtx notification when a transaction added to mempool or appears in a newly-attached block contains a txout pkScript sending to any of the passed addresses. Matching outpoints are automatically registered for redeemingtx notifications.| |Returns|Nothing| [Return to Overview](#ExtensionRequestOverview)
@@ -717,7 +718,7 @@ user. Click the method name for further details such as parameter and return in |---|---| |Method|stopnotifyreceived| |Notifications|None| -|Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"bitcoinaddress", (string) the bitcoin address`
  `...`
 `]`| +|Parameters|1. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`| |Description|Cancel registered receive notifications for each passed address.| |Returns|Nothing| [Return to Overview](#ExtensionRequestOverview)
@@ -731,7 +732,7 @@ user. Click the method name for further details such as parameter and return in |Method|notifyspent| |Notifications|[redeemingtx](#redeemingtx)| |Parameters|1. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`| -|Description|Send a redeemingtx notification when a transaction spending an outpoint appears in mempool (if relayed to this btcd instance) and when such a transaction first appears in a newly-attached block.| +|Description|Send a redeemingtx notification when a transaction spending an outpoint appears in mempool (if relayed to this dcrd instance) and when such a transaction first appears in a newly-attached block.| |Returns|Nothing| [Return to Overview](#ExtensionRequestOverview)
@@ -756,7 +757,7 @@ user. Click the method name for further details such as parameter and return in |---|---| |Method|rescan| |Notifications|[recvtx](#recvtx), [redeemingtx](#redeemingtx), [rescanprogress](#rescanprogress), and [rescanfinished](#rescanfinished)| -|Parameters|1. BeginBlock (string, required) block hash to begin rescanning from
2. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"bitcoinaddress", (string) the bitcoin address`
  `...`
 `]`
3. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`
4. EndBlock (string, optional) hash of final block to rescan| +|Parameters|1. BeginBlock (string, required) block hash to begin rescanning from
2. Addresses (JSON array, required)
 `[ (json array of strings)`
  `"decredaddress", (string) the decred address`
  `...`
 `]`
3. Outpoints (JSON array, required)
 `[ (JSON array)`
  `{ (JSON object)`
   `"hash":"data", (string) the hex-encoded bytes of the outpoint hash`
   `"index":n (numeric) the txout index of the outpoint`
  `},`
  `...`
 `]`
4. EndBlock (string, optional) hash of final block to rescan| |Description|Rescan block chain for transactions to addresses, starting at block BeginBlock and ending at EndBlock. The current known UTXO set for all passed addresses at height BeginBlock should included in the Outpoints argument. If EndBlock is omitted, the rescan continues through the best block in the main chain. Additionally, if no EndBlock is provided, the client is automatically registered for transaction notifications for all rescanned addresses and the final UTXO set. Rescan results are sent as recvtx and redeemingtx notifications. This call returns once the rescan completes.| |Returns|Nothing| [Return to Overview](#ExtensionRequestOverview)
@@ -785,13 +786,26 @@ user. Click the method name for further details such as parameter and return in |Parameters|None| |Description|Stop sending either a [txaccepted](#txaccepted) or a [txacceptedverbose](#txacceptedverbose) notification when a new transaction is accepted into the mempool.| |Returns|Nothing| -[Return to Overview](#ExtensionRequestOverview)
+[Return to Overview](#WSExtMethodOverview)
+ +*** + +
+ +| | | +|---|---| +|Method|session| +|Notifications|None| +|Parameters|None| +|Description|Return a JSON object with details regarding a websocket client's current connection to the RPC server. This currently only includes the session ID, a random unsigned 64-bit integer that is created for each newly connected client. Session IDs may be used to verify that the current connection was not lost and subsequently reestablished.| +|Returns|`{ (json object)`
  `"sessionid": n (numeric) the session ID`
`}`| +|Example Return|`{`
  `"sessionid": 67089679842`
`}`|
### 8. Notifications (Websocket-specific) -btcd uses standard JSON-RPC notifications to notify clients of changes, rather than requiring clients to poll btcd for updates. JSON-RPC notifications are a subset of requests, but do not contain an ID. The notification type is categorized by the `method` field and additional details are sent as a JSON array in the `params` field. +dcrd uses standard JSON-RPC notifications to notify clients of changes, rather than requiring clients to poll dcrd for updates. JSON-RPC notifications are a subset of requests, but do not contain an ID. The notification type is categorized by the `method` field and additional details are sent as a JSON array in the `params` field. **8.1 Notification Overview**
@@ -928,7 +942,7 @@ various languages. **9.1 Go** This section provides examples of using the RPC interface using Go and the -[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package. +[dcrrpcclient](https://github.com/decred/dcrrpcclient) package. * [Using getblockcount to Retrieve the Current Block Height](#ExampleGetBlockCount) * [Using getblock to Retrieve the Genesis Block](#ExampleGetBlock) @@ -939,16 +953,16 @@ This section provides examples of using the RPC interface using Go and the **9.1.1 Using getblockcount to Retrieve the Current Block Height**
The following is an example Go application which uses the -[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package to connect with -a btcd instance via Websockets, issues [getblockcount](#getblockcount) to +[dcrrpcclient](https://github.com/decred/dcrrpcclient) package to connect with +a dcrd instance via Websockets, issues [getblockcount](#getblockcount) to retrieve the current block height, and displays it. ```Go package main import ( - "github.com/btcsuite/btcrpcclient" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrrpcclient" + "github.com/decred/dcrutil" "io/ioutil" "log" "path/filepath" @@ -956,10 +970,10 @@ import ( func main() { // Load the certificate for the TLS connection which is automatically - // generated by btcd when it starts the RPC server and doesn't already + // generated by dcrd when it starts the RPC server and doesn't already // have one. - btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + dcrdHomeDir := dcrutil.AppDataDir("dcrd", false) + certs, err := ioutil.ReadFile(filepath.Join(dcrdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } @@ -967,14 +981,14 @@ func main() { // Create a new RPC client using websockets. Since this example is // not long-lived, the connection will be closed as soon as the program // exits. - connCfg := &btcrpcclient.ConnConfig{ + connCfg := &dcrrpcclient.ConnConfig{ Host: "localhost:8334", Endpoint: "ws", User: "yourrpcuser", Pass: "yourrpcpass", Certificates: certs, } - client, err := btcrpcclient.New(connCfg, nil) + client, err := dcrrpcclient.New(connCfg, nil) if err != nil { log.Fatal(err) } @@ -999,17 +1013,17 @@ Block count: 276978 **9.1.2 Using getblock to Retrieve the Genesis Block**
The following is an example Go application which uses the -[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package to connect with -a btcd instance via Websockets, issues [getblock](#getblock) to retrieve +[dcrrpcclient](https://github.com/decred/dcrrpcclient) package to connect with +a dcrd instance via Websockets, issues [getblock](#getblock) to retrieve information about the Genesis block, and display a few details about it. ```Go package main import ( - "github.com/btcsuite/btcrpcclient" - "github.com/btcsuite/btcutil" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrrpcclient" + "github.com/decred/dcrutil" + "github.com/decred/dcrd/wire" "io/ioutil" "log" "path/filepath" @@ -1018,10 +1032,10 @@ import ( func main() { // Load the certificate for the TLS connection which is automatically - // generated by btcd when it starts the RPC server and doesn't already + // generated by dcrd when it starts the RPC server and doesn't already // have one. - btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + dcrdHomeDir := dcrutil.AppDataDir("dcrd", false) + certs, err := ioutil.ReadFile(filepath.Join(dcrdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } @@ -1029,14 +1043,14 @@ func main() { // Create a new RPC client using websockets. Since this example is // not long-lived, the connection will be closed as soon as the program // exits. - connCfg := &btcrpcclient.ConnConfig{ + connCfg := &dcrrpcclient.ConnConfig{ Host: "localhost:18334", Endpoint: "ws", User: "yourrpcuser", Pass: "yourrpcpass", Certificates: certs, } - client, err := btcrpcclient.New(connCfg, nil) + client, err := dcrrpcclient.New(connCfg, nil) if err != nil { log.Fatal(err) } @@ -1087,8 +1101,8 @@ Num transactions: 1 Notifications (Websocket-specific)**
The following is an example Go application which uses the -[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package to connect with -a btcd instance via Websockets and registers for +[dcrrpcclient](https://github.com/decred/dcrrpcclient) package to connect with +a dcrd instance via Websockets and registers for [blockconnected](#blockconnected) and [blockdisconnected](#blockdisconnected) notifications with [notifyblocks](#notifyblocks). It also sets up handlers for the notifications. @@ -1097,9 +1111,9 @@ the notifications. package main import ( - "github.com/btcsuite/btcrpcclient" - "github.com/btcsuite/btcutil" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrrpcclient" + "github.com/decred/dcrutil" + "github.com/decred/dcrd/wire" "io/ioutil" "log" "path/filepath" @@ -1109,7 +1123,7 @@ import ( func main() { // Setup handlers for blockconnected and blockdisconnected // notifications. - ntfnHandlers := btcrpcclient.NotificationHandlers{ + ntfnHandlers := dcrrpcclient.NotificationHandlers{ OnBlockConnected: func(hash *wire.ShaHash, height int32) { log.Printf("Block connected: %v (%d)", hash, height) }, @@ -1119,23 +1133,23 @@ func main() { } // Load the certificate for the TLS connection which is automatically - // generated by btcd when it starts the RPC server and doesn't already + // generated by dcrd when it starts the RPC server and doesn't already // have one. - btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + dcrdHomeDir := dcrutil.AppDataDir("dcrd", false) + certs, err := ioutil.ReadFile(filepath.Join(dcrdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } // Create a new RPC client using websockets. - connCfg := &btcrpcclient.ConnConfig{ + connCfg := &dcrrpcclient.ConnConfig{ Host: "localhost:8334", Endpoint: "ws", User: "yourrpcuser", Pass: "yourrpcpass", Certificates: certs, } - client, err := btcrpcclient.New(connCfg, &ntfnHandlers) + client, err := dcrrpcclient.New(connCfg, &ntfnHandlers) if err != nil { log.Fatal(err) } @@ -1178,7 +1192,7 @@ Example output: **9.2.1 Using notifyblocks to be Notified of Block Connects and Disconnects**
The following is example node.js code which uses [ws](https://github.com/einaros/ws) -(can be installed with `npm install ws`) to connect with a btcd instance, +(can be installed with `npm install ws`) to connect with a dcrd instance, issues [notifyblocks](#notifyblocks) to register for [blockconnected](#blockconnected) and [blockdisconnected](#blockdisconnected) notifications, and displays all incoming messages. @@ -1188,14 +1202,14 @@ var fs = require('fs'); var WebSocket = require('ws'); // Load the certificate for the TLS connection which is automatically -// generated by btcd when it starts the RPC server and doesn't already +// generated by dcrd when it starts the RPC server and doesn't already // have one. -var cert = fs.readFileSync('/path/to/btcd/appdata/rpc.cert'); +var cert = fs.readFileSync('/path/to/dcrd/appdata/rpc.cert'); var user = "yourusername"; var password = "yourpassword"; -// Initiate the websocket connection. The btcd generated certificate acts as +// Initiate the websocket connection. The dcrd generated certificate acts as // its own certificate authority, so it needs to be specified in the 'ca' array // for the certificate to properly validate. var ws = new WebSocket('wss://127.0.0.1:8334/ws', { diff --git a/docs/using_bootstrap_dat.md b/docs/using_bootstrap_dat.md deleted file mode 100644 index d508831d..00000000 --- a/docs/using_bootstrap_dat.md +++ /dev/null @@ -1,74 +0,0 @@ -### Table of Contents -1. [What is bootstrap.dat?](#What)
-2. [What are the pros and cons of using bootstrap.dat?](#ProsCons) -3. [Where do I get bootstrap.dat?](#Obtaining) -4. [How do I know I can trust the bootstrap.dat I downloaded?](#Trust) -5. [How do I use bootstrap.dat with btcd?](#Importing) - -
-### 1. What is bootstrap.dat? - -It is a flat, binary file containing bitcoin blockchain data starting from the -genesis block and continuing through a relatively recent block height depending -on the last time it was updated. - -See [this](https://bitcointalk.org/index.php?topic=145386.0) thread on -bitcointalk for more details. - -**NOTE:** Using bootstrap.dat is entirely optional. Btcd will download the -block chain from other peers through the Bitcoin protocol with no extra -configuration needed. - - -### 2. What are the pros and cons of using bootstrap.dat? - -Pros: -- Typically accelerates the initial process of bringing up a new node as it - downloads from public P2P nodes and generally is able to achieve faster - download speeds -- It is particularly beneficial when bringing up multiple nodes as you only need - to download the data once - -Cons: -- Requires you to setup and configure a torrent client if you don't already have - one available -- Requires roughly twice as much disk space since you'll need the flat file as - well as the imported database - - -### 3. Where do I get bootstrap.dat? - -The bootstrap.dat file is made available via a torrent. See -[this](https://bitcointalk.org/index.php?topic=145386.0) thread on bitcointalk -for the torrent download details. - - -### 4. How do I know I can trust the bootstrap.dat I downloaded? - -You don't need to trust the file as the `addblock` utility verifies every block -using the same rules that are used when downloading the block chain normally -through the Bitcoin protocol. Additionally, the chain rules contain hard-coded -checkpoints for the known-good block chain at periodic intervals. This ensures -that not only is it a valid chain, but it is the same chain that everyone else -is using. - - -### 5. How do I use bootstrap.dat with btcd? - -btcd comes with a separate utility named `addblock` which can be used to import -`bootstrap.dat`. This approach is used since the import is a one-time operation -and we prefer to keep the daemon itself as lightweight as possible. - -1. Stop btcd if it is already running. This is required since addblock needs to - access the database used by btcd and it will be locked if btcd is using it. -2. Note the path to the downloaded bootstrap.dat file. -3. Run the addblock utility with the `-i` argument pointing to the location of - boostrap.dat:

-**Windows:** -```bat -C:\> "%PROGRAMFILES%\Btcd Suite\Btcd\addblock" -i C:\Path\To\bootstrap.dat -``` -**Linux/Unix/BSD/POSIX:** -```bash -$ $GOPATH/bin/addblock -i /path/to/bootstrap.dat -``` diff --git a/goclean.sh b/goclean.sh old mode 100755 new mode 100644 diff --git a/limits/limits_plan9.go b/limits/limits_plan9.go index 9c4699d6..6f31af07 100644 --- a/limits/limits_plan9.go +++ b/limits/limits_plan9.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/limits/limits_unix.go b/limits/limits_unix.go index 7ebf8667..a7345142 100644 --- a/limits/limits_unix.go +++ b/limits/limits_unix.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,7 +17,7 @@ const ( fileLimitMin = 1024 ) -// SetLimits raises some process limits to values which allow btcd and +// SetLimits raises some process limits to values which allow dcrd and // associated utilities to run. func SetLimits() error { var rLimit syscall.Rlimit diff --git a/limits/limits_windows.go b/limits/limits_windows.go index 62655d73..52270772 100644 --- a/limits/limits_windows.go +++ b/limits/limits_windows.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/log.go b/log.go index f73d73f6..77487fd1 100644 --- a/log.go +++ b/log.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,24 +11,19 @@ import ( "strings" "time" - "github.com/btcsuite/btcd/addrmgr" - - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btclog" "github.com/btcsuite/seelog" + + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" ) const ( - // lockTimeThreshold is the number below which a lock time is - // interpreted to be a block number. Since an average of one block - // is generated per 10 minutes, this allows blocks for about 9,512 - // years. However, if the field is interpreted as a timestamp, given - // the lock time is a uint32, the max is sometime around 2106. - lockTimeThreshold uint32 = 5e8 // Tue Nov 5 00:53:20 1985 UTC - // maxRejectReasonLen is the maximum length of a sanitized reject reason // that will be logged. maxRejectReasonLen = 250 @@ -43,7 +39,7 @@ var ( amgrLog = btclog.Disabled bcdbLog = btclog.Disabled bmgrLog = btclog.Disabled - btcdLog = btclog.Disabled + dcrdLog = btclog.Disabled chanLog = btclog.Disabled discLog = btclog.Disabled minrLog = btclog.Disabled @@ -51,6 +47,7 @@ var ( rpcsLog = btclog.Disabled scrpLog = btclog.Disabled srvrLog = btclog.Disabled + stkeLog = btclog.Disabled txmpLog = btclog.Disabled ) @@ -60,7 +57,7 @@ var subsystemLoggers = map[string]btclog.Logger{ "AMGR": amgrLog, "BCDB": bcdbLog, "BMGR": bmgrLog, - "BTCD": btcdLog, + "DCRD": dcrdLog, "CHAN": chanLog, "DISC": discLog, "MINR": minrLog, @@ -68,6 +65,7 @@ var subsystemLoggers = map[string]btclog.Logger{ "RPCS": rpcsLog, "SCRP": scrpLog, "SRVR": srvrLog, + "STKE": stkeLog, "TXMP": txmpLog, } @@ -110,8 +108,8 @@ func useLogger(subsystemID string, logger btclog.Logger) { case "BMGR": bmgrLog = logger - case "BTCD": - btcdLog = logger + case "DCRD": + dcrdLog = logger case "CHAN": chanLog = logger @@ -136,6 +134,10 @@ func useLogger(subsystemID string, logger btclog.Logger) { case "SRVR": srvrLog = logger + case "STKE": + stkeLog = logger + stake.UseLogger(logger) + case "TXMP": txmpLog = logger } @@ -214,9 +216,9 @@ func directionString(inbound bool) string { func formatLockTime(lockTime uint32) string { // The lock time field of a transaction is either a block height at // which the transaction is finalized or a timestamp depending on if the - // value is before the lockTimeThreshold. When it is under the + // value is before the txscript.LockTimeThreshold. When it is under the // threshold it is a block height. - if lockTime < lockTimeThreshold { + if lockTime < txscript.LockTimeThreshold { return fmt.Sprintf("height %d", lockTime) } @@ -251,7 +253,7 @@ func invSummary(invList []*wire.InvVect) string { } // locatorSummary returns a block locator as a human-readable string. -func locatorSummary(locator []*wire.ShaHash, stopHash *wire.ShaHash) string { +func locatorSummary(locator []*chainhash.Hash, stopHash *chainhash.Hash) string { if len(locator) > 0 { return fmt.Sprintf("locator %s, stop %s", locator[0], stopHash) } @@ -359,3 +361,10 @@ func messageSummary(msg wire.Message) string { // No summary for other messages. return "" } + +// fatalf logs a string, then cleanly exits. +func fatalf(str string) { + dcrdLog.Errorf("Unable to create profiler: %v", str) + backendLog.Flush() + os.Exit(1) +} diff --git a/mempool.go b/mempool.go index ebcaf5f7..a2913f66 100644 --- a/mempool.go +++ b/mempool.go @@ -1,23 +1,29 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( + "bytes" "container/list" "crypto/rand" "fmt" "math" "math/big" + "sort" "sync" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -68,22 +74,56 @@ const ( // considered standard. maxStandardMultiSigKeys = 3 - // minTxRelayFee is the minimum fee in satoshi that is required for a + // minTxRelayFeeMainNet is the minimum fee in atoms that is required for a // transaction to be treated as free for relay and mining purposes. It // is also used to help determine if a transaction is considered dust // and as a base for calculating minimum required fees for larger - // transactions. This value is in Satoshi/1000 bytes. - minTxRelayFee = 1000 + // transactions. This value is in Atom/1000 bytes. + minTxRelayFeeMainNet = 5e6 + + // minTxRelayFeeTestNet is the minimum relay fee for the Test and Simulation + // networks. + minTxRelayFeeTestNet = 1e3 + + // maxSSGensDoubleSpends is the maximum number of SSGen double spends + // allowed in the pool. + maxSSGensDoubleSpends = 64 + + // heightDiffToPruneTicket is the number of blocks to pass by in terms + // of height before old tickets are pruned. + // TODO Set this based up the stake difficulty retargeting interval? + heightDiffToPruneTicket = 288 + + // heightDiffToPruneVotes is the number of blocks to pass by in terms + // of height before SSGen relating to that block are pruned. + heightDiffToPruneVotes = 10 + + // maxNullDataOutputs is the maximum number of OP_RETURN null data + // pushes in a transaction, after which it is considered non-standard. + maxNullDataOutputs = 4 ) // TxDesc is a descriptor containing a transaction in the mempool and the // metadata we store about it. type TxDesc struct { - Tx *btcutil.Tx // Transaction. - Added time.Time // Time when added to pool. - Height int64 // Blockheight when added to pool. - Fee int64 // Transaction fees. - startingPriority float64 // Priority when added to the pool. + Tx *dcrutil.Tx // Transaction. + Type stake.TxType // Transcation type. + Added time.Time // Time when added to pool. + Height int64 // Blockheight when added to pool. + Fee int64 // Transaction fees. + startingPriority float64 // Priority when added to the pool. +} + +// GetType returns what TxType a given TxDesc is. +func (td *TxDesc) GetType() stake.TxType { + return td.Type +} + +// VoteTx is a struct describing a block vote (SSGen). +type VoteTx struct { + SsgenHash chainhash.Hash // Vote + SstxHash chainhash.Hash // Ticket + Vote bool } // txMemPool is used as a source of transactions that need to be mined into @@ -92,21 +132,264 @@ type TxDesc struct { type txMemPool struct { sync.RWMutex server *server - pool map[wire.ShaHash]*TxDesc - orphans map[wire.ShaHash]*btcutil.Tx - orphansByPrev map[wire.ShaHash]*list.List - addrindex map[string]map[wire.ShaHash]struct{} // maps address to txs - outpoints map[wire.OutPoint]*btcutil.Tx - lastUpdated time.Time // last time pool was updated + pool map[chainhash.Hash]*TxDesc + orphans map[chainhash.Hash]*dcrutil.Tx + orphansByPrev map[chainhash.Hash]*list.List + addrindex map[string]map[chainhash.Hash]struct{} // maps address to txs + outpoints map[wire.OutPoint]*dcrutil.Tx + + // Votes on blocks. + votes map[chainhash.Hash][]*VoteTx + votesMtx sync.Mutex + + lastUpdated time.Time // last time pool was updated. pennyTotal float64 // exponentially decaying total for penny spends. lastPennyUnix int64 // unix time of last ``penny spend'' } +// insertVote inserts a vote into the map of block votes. +// This function is safe for concurrent access. +func (mp *txMemPool) insertVote(ssgen *dcrutil.Tx) error { + voteHash := ssgen.Sha() + msgTx := ssgen.MsgTx() + ticketHash := &msgTx.TxIn[1].PreviousOutPoint.Hash + + // Get the block it is voting on; here we're agnostic of height. + blockHash, blockHeight, err := stake.GetSSGenBlockVotedOn(ssgen) + if err != nil { + return err + } + + voteBits := stake.GetSSGenVoteBits(ssgen) + vote := dcrutil.IsFlagSet16(voteBits, dcrutil.BlockValid) + + voteTx := &VoteTx{*voteHash, *ticketHash, vote} + vts, exists := mp.votes[blockHash] + + // If there are currently no votes for this block, + // start a new buffered slice and store it. + if !exists { + minrLog.Debugf("Accepted vote %v for block hash %v (height %v), "+ + "voting %v on the transaction tree", + voteHash, blockHash, blockHeight, vote) + + slice := make([]*VoteTx, int(mp.server.chainParams.TicketsPerBlock), + int(mp.server.chainParams.TicketsPerBlock)) + slice[0] = voteTx + mp.votes[blockHash] = slice + return nil + } + + // We already have a vote for this ticket; break. + for _, vt := range vts { + // At the end. + if vt == nil { + break + } + + if vt.SstxHash.IsEqual(ticketHash) { + return nil + } + } + + // Add the new vote in. Find where the first empty + // slot is and insert it. + for i, vt := range vts { + // At the end. + if vt == nil { + mp.votes[blockHash][i] = voteTx + break + } + } + + minrLog.Debugf("Accepted vote %v for block hash %v (height %v), "+ + "voting %v on the transaction tree", + voteHash, blockHash, blockHeight, vote) + + return nil +} + +// InsertVote calls insertVote, but makes it safe for concurrent access. +func (mp *txMemPool) InsertVote(ssgen *dcrutil.Tx) error { + mp.votesMtx.Lock() + defer mp.votesMtx.Unlock() + + err := mp.insertVote(ssgen) + + return err +} + +// getVoteHashesForBlock gets the transaction hashes of all the known votes for +// some block on the blockchain. +func (mp *txMemPool) getVoteHashesForBlock(block chainhash.Hash) ([]chainhash.Hash, + error) { + hashes := make([]chainhash.Hash, 0) + vts, exists := mp.votes[block] + if !exists { + return nil, fmt.Errorf("couldn't find block requested in mp.votes") + } + + if len(vts) == 0 { + return nil, fmt.Errorf("block found in mp.votes, but contains no votes") + } + + zeroHash := &chainhash.Hash{} + for _, vt := range vts { + if vt == nil { + break + } + + if vt.SsgenHash.IsEqual(zeroHash) { + return nil, fmt.Errorf("unset vote hash in vote info") + } + hashes = append(hashes, vt.SsgenHash) + } + + return hashes, nil +} + +// GetVoteHashesForBlock calls getVoteHashesForBlock, but makes it safe for +// concurrent access. +func (mp *txMemPool) GetVoteHashesForBlock(block chainhash.Hash) ([]chainhash.Hash, + error) { + mp.votesMtx.Lock() + defer mp.votesMtx.Unlock() + + hashes, err := mp.getVoteHashesForBlock(block) + + return hashes, err +} + +// TODO Pruning of the votes map DECRED + +func getNumberOfVotesOnBlock(blockVoteTxs []*VoteTx) int { + numVotes := 0 + for _, vt := range blockVoteTxs { + if vt == nil { + break + } + + numVotes++ + } + + return numVotes +} + +// blockWithLenVotes is a block with the number of votes currently present +// for that block. Just used for sorting. +type blockWithLenVotes struct { + Block chainhash.Hash + Votes uint16 +} + +// ByNumberOfVotes defines the methods needed to satisify sort.Interface to +// sort a slice of Blocks by their number of votes. +type ByNumberOfVotes []*blockWithLenVotes + +func (b ByNumberOfVotes) Len() int { return len(b) } +func (b ByNumberOfVotes) Less(i, j int) bool { return b[i].Votes < b[j].Votes } +func (b ByNumberOfVotes) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// sortParentsByVotes takes a list of block header hashes and sorts them +// by the number of votes currently available for them in the votes map of +// mempool. It then returns all blocks that are eligible to be used (have +// at least a majority number of votes) sorted by number of votes, descending. +func (mp *txMemPool) sortParentsByVotes(currentTopBlock chainhash.Hash, + blocks []chainhash.Hash) ([]chainhash.Hash, error) { + lenBlocks := len(blocks) + if lenBlocks == 0 { + return nil, fmt.Errorf("no blocks to sort") + } + + bwlvs := make([]*blockWithLenVotes, lenBlocks, lenBlocks) + + for i, blockHash := range blocks { + votes, exists := mp.votes[blockHash] + if exists { + bwlv := &blockWithLenVotes{ + blockHash, + uint16(getNumberOfVotesOnBlock(votes)), + } + bwlvs[i] = bwlv + } else { + bwlv := &blockWithLenVotes{ + blockHash, + uint16(0), + } + bwlvs[i] = bwlv + } + } + + // Blocks with the most votes appear at the top of the list. + sort.Sort(sort.Reverse(ByNumberOfVotes(bwlvs))) + + var sortedUsefulBlocks []chainhash.Hash + minimumVotesRequired := uint16((mp.server.chainParams.TicketsPerBlock / 2) + 1) + for _, bwlv := range bwlvs { + if bwlv.Votes >= minimumVotesRequired { + sortedUsefulBlocks = append(sortedUsefulBlocks, bwlv.Block) + } + } + + if sortedUsefulBlocks == nil { + return nil, miningRuleError(ErrNotEnoughVoters, + "no block had enough votes to build on top of") + } + + // Make sure we don't reorganize the chain needlessly if the top block has + // the same amount of votes as the current leader after the sort. After this + // point, all blocks listed in sortedUsefulBlocks definitely also have the + // minimum number of votes required. + topBlockVotes, exists := mp.votes[currentTopBlock] + topBlockVotesLen := 0 + if exists { + topBlockVotesLen = getNumberOfVotesOnBlock(topBlockVotes) + } + if bwlvs[0].Votes == uint16(topBlockVotesLen) { + if !bwlvs[0].Block.IsEqual(¤tTopBlock) { + // Find our block in the list. + pos := 0 + for i, bwlv := range bwlvs { + if bwlv.Block.IsEqual(¤tTopBlock) { + pos = i + break + } + } + + if pos == 0 { // Should never happen... + return nil, fmt.Errorf("couldn't find top block in list") + } + + // Swap the top block into the first position. We directly access + // sortedUsefulBlocks useful blocks here with the assumption that + // since the values were accumulated from blvs, they should be + // in the same positions and we shouldn't be able to access anything + // out of bounds. + sortedUsefulBlocks[0], sortedUsefulBlocks[pos] = + sortedUsefulBlocks[pos], sortedUsefulBlocks[0] + } + } + + return sortedUsefulBlocks, nil +} + +// SortParentsByVotes is the concurrency safe exported version of +// sortParentsByVotes. +func (mp *txMemPool) SortParentsByVotes(currentTopBlock chainhash.Hash, + blocks []chainhash.Hash) ([]chainhash.Hash, error) { + mp.votesMtx.Lock() + defer mp.votesMtx.Unlock() + + sortedBlocks, err := mp.sortParentsByVotes(currentTopBlock, blocks) + + return sortedBlocks, err +} + // isDust returns whether or not the passed transaction output amount is // considered dust or not. Dust is defined in terms of the minimum transaction // relay fee. In particular, if the cost to the network to spend coins is more // than 1/3 of the minimum transaction relay fee, it is considered dust. -func isDust(txOut *wire.TxOut) bool { +func isDust(txOut *wire.TxOut, params *chaincfg.Params) bool { // The total serialized size consists of the output and the associated // input script to redeem it. Since there is no input script // to redeem it yet, use the minimum size of a typical input script. @@ -153,17 +436,26 @@ func isDust(txOut *wire.TxOut) bool { // The output is considered dust if the cost to the network to spend the // coins is more than 1/3 of the minimum free transaction relay fee. - // minFreeTxRelayFee is in Satoshi/KB, so multiply by 1000 to + // minFreeTxRelayFee is in Atom/KB, so multiply by 1000 to // convert to bytes. // // Using the typical values for a pay-to-pubkey-hash transaction from // the breakdown above and the default minimum free transaction relay - // fee of 1000, this equates to values less than 546 satoshi being + // fee of 1000, this equates to values less than 546 atoms being // considered dust. // // The following is equivalent to (value/totalSize) * (1/3) * 1000 // without needing to do floating point math. - return txOut.Value*1000/(3*int64(totalSize)) < minTxRelayFee + var minTxRelayFee dcrutil.Amount + switch { + case params == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeMainNet + case params == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeTestNet + default: + minTxRelayFee = minTxRelayFeeTestNet + } + return txOut.Value*1000/(3*int64(totalSize)) < int64(minTxRelayFee) } // checkPkScriptStandard performs a series of checks on a transaction ouput @@ -171,7 +463,17 @@ func isDust(txOut *wire.TxOut) bool { // A standard public key script is one that is a recognized form, and for // multi-signature scripts, only contains from 1 to maxStandardMultiSigKeys // public keys. -func checkPkScriptStandard(pkScript []byte, scriptClass txscript.ScriptClass) error { +func checkPkScriptStandard(version uint16, pkScript []byte, + scriptClass txscript.ScriptClass) error { + // Only default Bitcoin-style script is standard except for + // null data outputs. + if version != wire.DefaultPkScriptVersion { + str := fmt.Sprintf("versions other than default pkscript version " + + "are currently non-standard except for provably unspendable " + + "outputs") + return txRuleError(wire.RejectNonstandard, str) + } + switch scriptClass { case txscript.MultiSigTy: numPubKeys, numSigs, err := txscript.CalcMultiSigStats(pkScript) @@ -223,11 +525,12 @@ func checkPkScriptStandard(pkScript []byte, scriptClass txscript.ScriptClass) er // finalized, conforming to more stringent size constraints, having scripts // of recognized forms, and not containing "dust" outputs (those that are // so small it costs more to process them than they are worth). -func (mp *txMemPool) checkTransactionStandard(tx *btcutil.Tx, height int64) error { +func (mp *txMemPool) checkTransactionStandard(tx *dcrutil.Tx, txType stake.TxType, + height int64) error { msgTx := tx.MsgTx() // The transaction must be a currently supported version. - if msgTx.Version > wire.TxVersion || msgTx.Version < 1 { + if !wire.IsSupportedMsgTxVersion(msgTx) { str := fmt.Sprintf("transaction version %d is not in the "+ "valid range of %d-%d", msgTx.Version, 1, wire.TxVersion) @@ -273,14 +576,15 @@ func (mp *txMemPool) checkTransactionStandard(tx *btcutil.Tx, height int64) erro "script is not push only", i) return txRuleError(wire.RejectNonstandard, str) } + } // None of the output public key scripts can be a non-standard script or // be "dust" (except when the script is a null data script). numNullDataOutputs := 0 for i, txOut := range msgTx.TxOut { - scriptClass := txscript.GetScriptClass(txOut.PkScript) - err := checkPkScriptStandard(txOut.PkScript, scriptClass) + scriptClass := txscript.GetScriptClass(txOut.Version, txOut.PkScript) + err := checkPkScriptStandard(txOut.Version, txOut.PkScript, scriptClass) if err != nil { // Attempt to extract a reject code from the error so // it can be retained. When not possible, fall back to @@ -298,7 +602,8 @@ func (mp *txMemPool) checkTransactionStandard(tx *btcutil.Tx, height int64) erro // "dust". if scriptClass == txscript.NullDataTy { numNullDataOutputs++ - } else if isDust(txOut) { + } else if isDust(txOut, mp.server.chainParams) && + txType != stake.TxTypeSStx { str := fmt.Sprintf("transaction output %d: payment "+ "of %d is dust", i, txOut.Value) return txRuleError(wire.RejectDust, str) @@ -306,9 +611,12 @@ func (mp *txMemPool) checkTransactionStandard(tx *btcutil.Tx, height int64) erro } // A standard transaction must not have more than one output script that - // only carries data. - if numNullDataOutputs > 1 { - str := "more than one transaction output in a nulldata script" + // only carries data. However, certain types of standard stake transactions + // are allowed to have multiple OP_RETURN outputs, so only throw an error here + // if the tx is TxTypeRegular. + if numNullDataOutputs > maxNullDataOutputs && txType == stake.TxTypeRegular { + str := "more than one transaction output in a nulldata script for a " + + "regular type tx" return txRuleError(wire.RejectNonstandard, str) } @@ -322,12 +630,19 @@ func (mp *txMemPool) checkTransactionStandard(tx *btcutil.Tx, height int64) erro // exhaustion attacks by "creative" use of scripts that are super expensive to // process like OP_DUP OP_CHECKSIG OP_DROP repeated a large number of times // followed by a final OP_TRUE. -func checkInputsStandard(tx *btcutil.Tx, txStore blockchain.TxStore) error { +// Decred TODO: I think this is okay, but we'll see with simnet. +func checkInputsStandard(tx *dcrutil.Tx, + txType stake.TxType, + txStore blockchain.TxStore) error { // NOTE: The reference implementation also does a coinbase check here, // but coinbases have already been rejected prior to calling this // function so no need to recheck. for i, txIn := range tx.MsgTx().TxIn { + if i == 0 && txType == stake.TxTypeSSGen { + continue + } + // It is safe to elide existence and index checks here since // they have already been checked prior to calling this // function. @@ -369,19 +684,29 @@ func checkInputsStandard(tx *btcutil.Tx, txStore blockchain.TxStore) error { // calcMinRequiredTxRelayFee returns the minimum transaction fee required for a // transaction with the passed serialized size to be accepted into the memory // pool and relayed. -func calcMinRequiredTxRelayFee(serializedSize int64) int64 { +func calcMinRequiredTxRelayFee(serializedSize int64, + params *chaincfg.Params) int64 { // Calculate the minimum fee for a transaction to be allowed into the // mempool and relayed by scaling the base fee (which is the minimum - // free transaction relay fee). minTxRelayFee is in Satoshi/KB, so + // free transaction relay fee). minTxRelayFee is in Atom/KB, so // divide the transaction size by 1000 to convert to kilobytes. Also, // integer division is used so fees only increase on full kilobyte // boundaries. - minFee := (1 + serializedSize/1000) * minTxRelayFee + var minTxRelayFee dcrutil.Amount + switch { + case params == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeMainNet + case params == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeTestNet + default: + minTxRelayFee = minTxRelayFeeTestNet + } + minFee := (1 + serializedSize/1000) * int64(minTxRelayFee) // Set the minimum fee to the maximum possible value if the calculated // fee is not in the valid range for monetary amounts. - if minFee < 0 || minFee > btcutil.MaxSatoshi { - minFee = btcutil.MaxSatoshi + if minFee < 0 || minFee > dcrutil.MaxAmount { + minFee = dcrutil.MaxAmount } return minFee @@ -391,7 +716,9 @@ func calcMinRequiredTxRelayFee(serializedSize int64) int64 { // RemoveOrphan. See the comment for RemoveOrphan for more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) removeOrphan(txHash *wire.ShaHash) { +func (mp *txMemPool) removeOrphan(txHash *chainhash.Hash) { + txmpLog.Tracef("Removing orphan transaction %v", txHash) + // Nothing to do if passed tx is not an orphan. tx, exists := mp.orphans[*txHash] if !exists { @@ -403,7 +730,7 @@ func (mp *txMemPool) removeOrphan(txHash *wire.ShaHash) { originTxHash := txIn.PreviousOutPoint.Hash if orphans, exists := mp.orphansByPrev[originTxHash]; exists { for e := orphans.Front(); e != nil; e = e.Next() { - if e.Value.(*btcutil.Tx) == tx { + if e.Value.(*dcrutil.Tx) == tx { orphans.Remove(e) break } @@ -425,7 +752,7 @@ func (mp *txMemPool) removeOrphan(txHash *wire.ShaHash) { // previous orphan index. // // This function is safe for concurrent access. -func (mp *txMemPool) RemoveOrphan(txHash *wire.ShaHash) { +func (mp *txMemPool) RemoveOrphan(txHash *chainhash.Hash) { mp.Lock() mp.removeOrphan(txHash) mp.Unlock() @@ -438,7 +765,7 @@ func (mp *txMemPool) RemoveOrphan(txHash *wire.ShaHash) { func (mp *txMemPool) limitNumOrphans() error { if len(mp.orphans)+1 > cfg.MaxOrphanTxs && cfg.MaxOrphanTxs > 0 { // Generate a cryptographically random hash. - randHashBytes := make([]byte, wire.HashSize) + randHashBytes := make([]byte, chainhash.HashSize) _, err := rand.Read(randHashBytes) if err != nil { return err @@ -450,7 +777,7 @@ func (mp *txMemPool) limitNumOrphans() error { // to Go's range statement over maps) as a fallback if none of // the hashes in the orphan pool are larger than the random // hash. - var foundHash *wire.ShaHash + var foundHash *chainhash.Hash for txHash := range mp.orphans { if foundHash == nil { foundHash = &txHash @@ -471,7 +798,7 @@ func (mp *txMemPool) limitNumOrphans() error { // addOrphan adds an orphan transaction to the orphan pool. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) addOrphan(tx *btcutil.Tx) { +func (mp *txMemPool) addOrphan(tx *dcrutil.Tx) { // Limit the number orphan transactions to prevent memory exhaustion. A // random orphan is evicted to make room if needed. mp.limitNumOrphans() @@ -492,7 +819,7 @@ func (mp *txMemPool) addOrphan(tx *btcutil.Tx) { // maybeAddOrphan potentially adds an orphan to the orphan pool. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) maybeAddOrphan(tx *btcutil.Tx) error { +func (mp *txMemPool) maybeAddOrphan(tx *dcrutil.Tx) error { // Ignore orphan transactions that are too large. This helps avoid // a memory exhaustion attack based on sending a lot of really large // orphans. In the case there is a valid transaction larger than this, @@ -521,7 +848,7 @@ func (mp *txMemPool) maybeAddOrphan(tx *btcutil.Tx) error { // exists in the main pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) isTransactionInPool(hash *wire.ShaHash) bool { +func (mp *txMemPool) isTransactionInPool(hash *chainhash.Hash) bool { if _, exists := mp.pool[*hash]; exists { return true } @@ -533,7 +860,7 @@ func (mp *txMemPool) isTransactionInPool(hash *wire.ShaHash) bool { // exists in the main pool. // // This function is safe for concurrent access. -func (mp *txMemPool) IsTransactionInPool(hash *wire.ShaHash) bool { +func (mp *txMemPool) IsTransactionInPool(hash *chainhash.Hash) bool { // Protect concurrent access. mp.RLock() defer mp.RUnlock() @@ -545,7 +872,7 @@ func (mp *txMemPool) IsTransactionInPool(hash *wire.ShaHash) bool { // in the orphan pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) isOrphanInPool(hash *wire.ShaHash) bool { +func (mp *txMemPool) isOrphanInPool(hash *chainhash.Hash) bool { if _, exists := mp.orphans[*hash]; exists { return true } @@ -557,7 +884,7 @@ func (mp *txMemPool) isOrphanInPool(hash *wire.ShaHash) bool { // in the orphan pool. // // This function is safe for concurrent access. -func (mp *txMemPool) IsOrphanInPool(hash *wire.ShaHash) bool { +func (mp *txMemPool) IsOrphanInPool(hash *chainhash.Hash) bool { // Protect concurrent access. mp.RLock() defer mp.RUnlock() @@ -569,7 +896,7 @@ func (mp *txMemPool) IsOrphanInPool(hash *wire.ShaHash) bool { // in the main pool or in the orphan pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) haveTransaction(hash *wire.ShaHash) bool { +func (mp *txMemPool) haveTransaction(hash *chainhash.Hash) bool { return mp.isTransactionInPool(hash) || mp.isOrphanInPool(hash) } @@ -577,7 +904,7 @@ func (mp *txMemPool) haveTransaction(hash *wire.ShaHash) bool { // in the main pool or in the orphan pool. // // This function is safe for concurrent access. -func (mp *txMemPool) HaveTransaction(hash *wire.ShaHash) bool { +func (mp *txMemPool) HaveTransaction(hash *chainhash.Hash) bool { // Protect concurrent access. mp.RLock() defer mp.RUnlock() @@ -589,84 +916,48 @@ func (mp *txMemPool) HaveTransaction(hash *wire.ShaHash) bool { // RemoveTransaction. See the comment for RemoveTransaction for more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) removeTransaction(tx *btcutil.Tx) { - // Remove any transactions which rely on this one. +func (mp *txMemPool) removeTransaction(tx *dcrutil.Tx, removeRedeemers bool) { + txmpLog.Tracef("Removing transaction %v", tx.Sha()) + txHash := tx.Sha() - for i := uint32(0); i < uint32(len(tx.MsgTx().TxOut)); i++ { - outpoint := wire.NewOutPoint(txHash, i) - if txRedeemer, exists := mp.outpoints[*outpoint]; exists { - mp.removeTransaction(txRedeemer) + if removeRedeemers { + // Remove any transactions which rely on this one. + txType := stake.DetermineTxType(tx) + tree := dcrutil.TxTreeRegular + if txType != stake.TxTypeRegular { + tree = dcrutil.TxTreeStake + } + for i := uint32(0); i < uint32(len(tx.MsgTx().TxOut)); i++ { + outpoint := wire.NewOutPoint(txHash, i, tree) + if txRedeemer, exists := mp.outpoints[*outpoint]; exists { + mp.removeTransaction(txRedeemer, true) + } } } // Remove the transaction and mark the referenced outpoints as unspent // by the pool. if txDesc, exists := mp.pool[*txHash]; exists { - if cfg.AddrIndex { - mp.removeTransactionFromAddrIndex(tx) - } - for _, txIn := range txDesc.Tx.MsgTx().TxIn { delete(mp.outpoints, txIn.PreviousOutPoint) } delete(mp.pool, *txHash) mp.lastUpdated = time.Now() } - } -// removeTransactionFromAddrIndex removes the passed transaction from our -// address based index. -// -// This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) removeTransactionFromAddrIndex(tx *btcutil.Tx) error { - previousOutputScripts, err := mp.fetchReferencedOutputScripts(tx) - if err != nil { - txmpLog.Errorf("Unable to obtain referenced output scripts for "+ - "the passed tx (addrindex): %v", err) - return err - } - - for _, pkScript := range previousOutputScripts { - mp.removeScriptFromAddrIndex(pkScript, tx) - } - - for _, txOut := range tx.MsgTx().TxOut { - mp.removeScriptFromAddrIndex(txOut.PkScript, tx) - } - - return nil -} - -// removeScriptFromAddrIndex dissociates the address encoded by the -// passed pkScript from the passed tx in our address based tx index. -// -// This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) removeScriptFromAddrIndex(pkScript []byte, tx *btcutil.Tx) error { - _, addresses, _, err := txscript.ExtractPkScriptAddrs(pkScript, - activeNetParams.Params) - if err != nil { - txmpLog.Errorf("Unable to extract encoded addresses from script "+ - "for addrindex (addrindex): %v", err) - return err - } - for _, addr := range addresses { - delete(mp.addrindex[addr.EncodeAddress()], *tx.Sha()) - } - - return nil -} - -// RemoveTransaction removes the passed transaction and any transactions which -// depend on it from the memory pool. +// RemoveTransaction removes the passed transaction from the mempool. If +// removeRedeemers flag is set, any transactions that redeem outputs from the +// removed transaction will also be removed recursively from the mempool, as +// they would otherwise become orphan. // // This function is safe for concurrent access. -func (mp *txMemPool) RemoveTransaction(tx *btcutil.Tx) { +func (mp *txMemPool) RemoveTransaction(tx *dcrutil.Tx, removeRedeemers bool) { // Protect concurrent access. mp.Lock() defer mp.Unlock() - mp.removeTransaction(tx) + mp.removeTransaction(tx, removeRedeemers) } // RemoveDoubleSpends removes all transactions which spend outputs spent by the @@ -676,7 +967,7 @@ func (mp *txMemPool) RemoveTransaction(tx *btcutil.Tx) { // contain transactions which were previously unknown to the memory pool // // This function is safe for concurrent access. -func (mp *txMemPool) RemoveDoubleSpends(tx *btcutil.Tx) { +func (mp *txMemPool) RemoveDoubleSpends(tx *dcrutil.Tx) { // Protect concurrent access. mp.Lock() defer mp.Unlock() @@ -684,7 +975,7 @@ func (mp *txMemPool) RemoveDoubleSpends(tx *btcutil.Tx) { for _, txIn := range tx.MsgTx().TxIn { if txRedeemer, ok := mp.outpoints[txIn.PreviousOutPoint]; ok { if !txRedeemer.Sha().IsEqual(tx.Sha()) { - mp.removeTransaction(txRedeemer) + mp.removeTransaction(txRedeemer, true) } } } @@ -695,11 +986,16 @@ func (mp *txMemPool) RemoveDoubleSpends(tx *btcutil.Tx) { // helper for maybeAcceptTransaction. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) addTransaction(tx *btcutil.Tx, height, fee int64) { +func (mp *txMemPool) addTransaction( + tx *dcrutil.Tx, + txType stake.TxType, + height, + fee int64) { // Add the transaction to the pool and mark the referenced outpoints // as spent by the pool. mp.pool[*tx.Sha()] = &TxDesc{ Tx: tx, + Type: txType, Added: time.Now(), Height: height, Fee: fee, @@ -708,42 +1004,14 @@ func (mp *txMemPool) addTransaction(tx *btcutil.Tx, height, fee int64) { mp.outpoints[txIn.PreviousOutPoint] = tx } mp.lastUpdated = time.Now() - - if cfg.AddrIndex { - mp.addTransactionToAddrIndex(tx) - } -} - -// addTransactionToAddrIndex adds all addresses related to the transaction to -// our in-memory address index. Note that this address is only populated when -// we're running with the optional address index activated. -// -// This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) addTransactionToAddrIndex(tx *btcutil.Tx) error { - previousOutScripts, err := mp.fetchReferencedOutputScripts(tx) - if err != nil { - txmpLog.Errorf("Unable to obtain referenced output scripts for "+ - "the passed tx (addrindex): %v", err) - return err - } - // Index addresses of all referenced previous output tx's. - for _, pkScript := range previousOutScripts { - mp.indexScriptAddressToTx(pkScript, tx) - } - - // Index addresses of all created outputs. - for _, txOut := range tx.MsgTx().TxOut { - mp.indexScriptAddressToTx(txOut.PkScript, tx) - } - - return nil } // fetchReferencedOutputScripts looks up and returns all the scriptPubKeys // referenced by inputs of the passed transaction. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) fetchReferencedOutputScripts(tx *btcutil.Tx) ([][]byte, error) { +func (mp *txMemPool) fetchReferencedOutputScripts(tx *dcrutil.Tx) ([][]byte, + error) { txStore, err := mp.fetchInputTransactions(tx) if err != nil || len(txStore) == 0 { return nil, err @@ -753,8 +1021,10 @@ func (mp *txMemPool) fetchReferencedOutputScripts(tx *btcutil.Tx) ([][]byte, err for _, txIn := range tx.MsgTx().TxIn { outPoint := txIn.PreviousOutPoint if txStore[outPoint.Hash].Err == nil { - referencedOutPoint := txStore[outPoint.Hash].Tx.MsgTx().TxOut[outPoint.Index] - previousOutScripts = append(previousOutScripts, referencedOutPoint.PkScript) + referencedOutPoint := + txStore[outPoint.Hash].Tx.MsgTx().TxOut[outPoint.Index] + previousOutScripts = + append(previousOutScripts, referencedOutPoint.PkScript) } } return previousOutScripts, nil @@ -764,8 +1034,9 @@ func (mp *txMemPool) fetchReferencedOutputScripts(tx *btcutil.Tx) ([][]byte, err // encoded by the passed scriptPubKey to the passed transaction. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) indexScriptAddressToTx(pkScript []byte, tx *btcutil.Tx) error { - _, addresses, _, err := txscript.ExtractPkScriptAddrs(pkScript, +func (mp *txMemPool) indexScriptAddressToTx(pkVersion uint16, pkScript []byte, + tx *dcrutil.Tx) error { + _, addresses, _, err := txscript.ExtractPkScriptAddrs(pkVersion, pkScript, activeNetParams.Params) if err != nil { txmpLog.Errorf("Unable to extract encoded addresses from script "+ @@ -775,7 +1046,7 @@ func (mp *txMemPool) indexScriptAddressToTx(pkScript []byte, tx *btcutil.Tx) err for _, addr := range addresses { if mp.addrindex[addr.EncodeAddress()] == nil { - mp.addrindex[addr.EncodeAddress()] = make(map[wire.ShaHash]struct{}) + mp.addrindex[addr.EncodeAddress()] = make(map[chainhash.Hash]struct{}) } mp.addrindex[addr.EncodeAddress()][*tx.Sha()] = struct{}{} } @@ -789,7 +1060,8 @@ func (mp *txMemPool) indexScriptAddressToTx(pkScript []byte, tx *btcutil.Tx) err // age is the sum of this value for each txin. Any inputs to the transaction // which are currently in the mempool and hence not mined into a block yet, // contribute no additional input age to the transaction. -func calcInputValueAge(txDesc *TxDesc, txStore blockchain.TxStore, nextBlockHeight int64) float64 { +func calcInputValueAge(txDesc *TxDesc, txStore blockchain.TxStore, + nextBlockHeight int64) float64 { var totalInputAge float64 for _, txIn := range txDesc.Tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash @@ -832,7 +1104,7 @@ func minInt(a, b int) int { // of each of its input values multiplied by their age (# of confirmations). // Thus, the final formula for the priority is: // sum(inputValue * inputAge) / adjustedTxSize -func calcPriority(tx *btcutil.Tx, inputValueAge float64) float64 { +func calcPriority(tx *dcrutil.Tx, inputValueAge float64) float64 { // In order to encourage spending multiple old unspent transaction // outputs thereby reducing the total set, don't count the constant // overhead for each input as well as enough bytes of the signature @@ -885,7 +1157,8 @@ func (txD *TxDesc) StartingPriority(txStore blockchain.TxStore) float64 { // CurrentPriority calculates the current priority of this tx descriptor's // underlying transaction relative to the next block height. -func (txD *TxDesc) CurrentPriority(txStore blockchain.TxStore, nextBlockHeight int64) float64 { +func (txD *TxDesc) CurrentPriority(txStore blockchain.TxStore, + nextBlockHeight int64) float64 { inputAge := calcInputValueAge(txD, txStore, nextBlockHeight) return calcPriority(txD.Tx, inputAge) } @@ -896,12 +1169,19 @@ func (txD *TxDesc) CurrentPriority(txStore blockchain.TxStore, nextBlockHeight i // main chain. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) checkPoolDoubleSpend(tx *btcutil.Tx) error { - for _, txIn := range tx.MsgTx().TxIn { +func (mp *txMemPool) checkPoolDoubleSpend(tx *dcrutil.Tx, + txType stake.TxType) error { + + for i, txIn := range tx.MsgTx().TxIn { + // We don't care about double spends of stake bases. + if (txType == stake.TxTypeSSGen || txType == stake.TxTypeSSRtx) && + (i == 0) { + continue + } + if txR, exists := mp.outpoints[txIn.PreviousOutPoint]; exists { - str := fmt.Sprintf("output %v already spent by "+ - "transaction %v in the memory pool", - txIn.PreviousOutPoint, txR.Sha()) + str := fmt.Sprintf("transaction %v in the pool "+ + "already spends the same coins", txR.Sha()) return txRuleError(wire.RejectDuplicate, str) } } @@ -909,13 +1189,63 @@ func (mp *txMemPool) checkPoolDoubleSpend(tx *btcutil.Tx) error { return nil } +// isTxTreeValid checks the map of votes for a block to see if the tx +// tree regular for the block at HEAD is valid. +func (mp *txMemPool) isTxTreeValid(newestHash *chainhash.Hash) bool { + // There are no votes on the block currently; assume it's valid. + if mp.votes[*newestHash] == nil { + return true + } + + // There are not possibly enough votes to tell if the txTree is valid; + // assume it's valid. + if len(mp.votes[*newestHash]) <= + int(mp.server.chainParams.TicketsPerBlock/2) { + return true + } + + // Otherwise, tally the votes and determine if it's valid or not. + yea := 0 + nay := 0 + + for _, vote := range mp.votes[*newestHash] { + // End of list, break. + if vote == nil { + break + } + + if vote.Vote == true { + yea++ + } else { + nay++ + } + } + + if yea > nay { + return true + } + return false +} + +// IsTxTreeValid calls isTxTreeValid, but makes it safe for concurrent access. +func (mp *txMemPool) IsTxTreeValid(best *chainhash.Hash) bool { + mp.votesMtx.Lock() + defer mp.votesMtx.Unlock() + isValid := mp.isTxTreeValid(best) + + return isValid +} + // fetchInputTransactions fetches the input transactions referenced by the // passed transaction. First, it fetches from the main chain, then it tries to // fetch any missing inputs from the transaction pool. // // This function MUST be called with the mempool lock held (for reads). -func (mp *txMemPool) fetchInputTransactions(tx *btcutil.Tx) (blockchain.TxStore, error) { - txStore, err := mp.server.blockManager.blockChain.FetchTransactionStore(tx) +func (mp *txMemPool) fetchInputTransactions(tx *dcrutil.Tx) (blockchain.TxStore, + error) { + tv := mp.IsTxTreeValid(mp.server.blockManager.chainState.newestHash) + txStore, err := mp.server.blockManager.blockChain.FetchTransactionStore(tx, + tv) if err != nil { return nil, err } @@ -927,6 +1257,7 @@ func (mp *txMemPool) fetchInputTransactions(tx *btcutil.Tx) (blockchain.TxStore, poolTx := poolTxDesc.Tx txD.Tx = poolTx txD.BlockHeight = mempoolHeight + txD.BlockIndex = wire.NullBlockIndex txD.Spent = make([]bool, len(poolTx.MsgTx().TxOut)) txD.Err = nil } @@ -941,7 +1272,8 @@ func (mp *txMemPool) fetchInputTransactions(tx *btcutil.Tx) (blockchain.TxStore, // orphans. // // This function is safe for concurrent access. -func (mp *txMemPool) FetchTransaction(txHash *wire.ShaHash) (*btcutil.Tx, error) { +func (mp *txMemPool) FetchTransaction(txHash *chainhash.Hash) (*dcrutil.Tx, + error) { // Protect concurrent access. mp.RLock() defer mp.RUnlock() @@ -956,13 +1288,14 @@ func (mp *txMemPool) FetchTransaction(txHash *wire.ShaHash) (*btcutil.Tx, error) // FilterTransactionsByAddress returns all transactions currently in the // mempool that either create an output to the passed address or spend a // previously created ouput to the address. -func (mp *txMemPool) FilterTransactionsByAddress(addr btcutil.Address) ([]*btcutil.Tx, error) { +func (mp *txMemPool) FilterTransactionsByAddress( + addr dcrutil.Address) ([]*dcrutil.Tx, error) { // Protect concurrent access. mp.RLock() defer mp.RUnlock() if txs, exists := mp.addrindex[addr.EncodeAddress()]; exists { - addressTxs := make([]*btcutil.Tx, 0, len(txs)) + addressTxs := make([]*dcrutil.Tx, 0, len(txs)) for txHash := range txs { if tx, exists := mp.pool[txHash]; exists { addressTxs = append(addressTxs, tx.Tx) @@ -974,12 +1307,41 @@ func (mp *txMemPool) FilterTransactionsByAddress(addr btcutil.Address) ([]*btcut return nil, fmt.Errorf("address does not have any transactions in the pool") } +// This function detects whether or not a transaction is a stake transaction and, +// if it is, also returns the type of stake transaction. +func detectTxType(tx *dcrutil.Tx) stake.TxType { + // Check to see if it's an SStx + if pass, _ := stake.IsSStx(tx); pass { + return stake.TxTypeSStx + } + + // Check to see if it's an SSGen + if pass, _ := stake.IsSSGen(tx); pass { + return stake.TxTypeSSGen + } + + // Check to see if it's an SSGen + if pass, _ := stake.IsSSRtx(tx); pass { + return stake.TxTypeSSRtx + } + + // If it's none of these things, it's a malformed or non-standard stake tx + // which will be rejected during other checks or a regular tx. + return stake.TxTypeRegular +} + // maybeAcceptTransaction is the internal function which implements the public // MaybeAcceptTransaction. See the comment for MaybeAcceptTransaction for // more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit bool) ([]*wire.ShaHash, error) { +// DECRED - TODO +// We need to make sure thing also assigns the TxType after it evaluates the tx, +// so that we can easily pick different stake tx types from the mempool later. +// This should probably be done at the bottom using "IsSStx" etc functions. +// It should also set the dcrutil tree type for the tx as well. +func (mp *txMemPool) maybeAcceptTransaction(tx *dcrutil.Tx, isNew, + rateLimit bool) ([]*chainhash.Hash, error) { txHash := tx.Sha() // Don't accept the transaction if it already exists in the pool. This @@ -991,9 +1353,9 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo } // Perform preliminary sanity checks on the transaction. This makes - // use of btcchain which contains the invariant rules for what + // use of chain which contains the invariant rules for what // transactions are allowed into blocks. - err := blockchain.CheckTransactionSanity(tx) + err := blockchain.CheckTransactionSanity(tx, mp.server.chainParams) if err != nil { if cerr, ok := err.(blockchain.RuleError); ok { return nil, chainRuleError(cerr) @@ -1029,10 +1391,20 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo } nextBlockHeight := curHeight + 1 + // Determine what type of transaction we're dealing with (regular or stake). + // Then, be sure to set the tx tree correctly as it's possible a use submitted + // it to the network with TxTreeUnknown. + txType := detectTxType(tx) + if txType == stake.TxTypeRegular { + tx.SetTree(dcrutil.TxTreeRegular) + } else { + tx.SetTree(dcrutil.TxTreeStake) + } + // Don't allow non-standard transactions if the network parameters // forbid their relaying. if !activeNetParams.RelayNonStdTxs { - err := mp.checkTransactionStandard(tx, nextBlockHeight) + err := mp.checkTransactionStandard(tx, txType, nextBlockHeight) if err != nil { // Attempt to extract a reject code from the error so // it can be retained. When not possible, fall back to @@ -1047,19 +1419,55 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo } } - // The transaction may not use any of the same outputs as other - // transactions already in the pool as that would ultimately result in a - // double spend. This check is intended to be quick and therefore only - // detects double spends within the transaction pool itself. The - // transaction could still be double spending coins from the main chain - // at this point. There is a more in-depth check that happens later - // after fetching the referenced transaction inputs from the main chain - // which examines the actual spend data and prevents double spends. - err = mp.checkPoolDoubleSpend(tx) - if err != nil { - return nil, err - } + isSSGen, _ := stake.IsSSGen(tx) + isSSRtx, _ := stake.IsSSRtx(tx) + if isSSGen || isSSRtx { + if isSSGen { + ssGenAlreadyFound := 0 + for _, mpTx := range mp.pool { + if mpTx.GetType() == stake.TxTypeSSGen { + if mpTx.Tx.MsgTx().TxIn[1].PreviousOutPoint == + tx.MsgTx().TxIn[1].PreviousOutPoint { + ssGenAlreadyFound++ + } + } + if ssGenAlreadyFound > maxSSGensDoubleSpends { + str := fmt.Sprintf("transaction %v in the pool "+ + "with more than %v ssgens", + tx.MsgTx().TxIn[1].PreviousOutPoint, + maxSSGensDoubleSpends) + return nil, txRuleError(wire.RejectDuplicate, str) + } + } + } + if isSSRtx { + for _, mpTx := range mp.pool { + if mpTx.GetType() == stake.TxTypeSSRtx { + if mpTx.Tx.MsgTx().TxIn[0].PreviousOutPoint == + tx.MsgTx().TxIn[0].PreviousOutPoint { + str := fmt.Sprintf("transaction %v in the pool "+ + " as a ssrtx. Only one ssrtx allowed.", + tx.MsgTx().TxIn[0].PreviousOutPoint) + return nil, txRuleError(wire.RejectDuplicate, str) + } + } + } + } + } else { + // The transaction may not use any of the same outputs as other + // transactions already in the pool as that would ultimately result in a + // double spend. This check is intended to be quick and therefore only + // detects double spends within the transaction pool itself. The + // transaction could still be double spending coins from the main chain + // at this point. There is a more in-depth check that happens later + // after fetching the referenced transaction inputs from the main chain + // which examines the actual spend data and prevents double spends. + err = mp.checkPoolDoubleSpend(tx, txType) + if err != nil { + return nil, err + } + } // Fetch all of the transactions referenced by the inputs to this // transaction. This function also attempts to fetch the transaction // itself to be used for detecting a duplicate transaction without @@ -1084,25 +1492,27 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo } delete(txStore, *txHash) - // Transaction is an orphan if any of the referenced input transactions - // don't exist. Adding orphans to the orphan pool is not handled by - // this function, and the caller should use maybeAddOrphan if this - // behavior is desired. - var missingParents []*wire.ShaHash + // Transaction is an orphan if any of the inputs don't exist. + var missingParents []*chainhash.Hash for _, txD := range txStore { if txD.Err == database.ErrTxShaMissing { missingParents = append(missingParents, txD.Hash) } } - if len(missingParents) != 0 { + + if len(missingParents) > 0 { return missingParents, nil } // Perform several checks on the transaction inputs using the invariant - // rules in btcchain for what transactions are allowed into blocks. + // rules in chain for what transactions are allowed into blocks. // Also returns the fees associated with the transaction which will be // used later. - txFee, err := blockchain.CheckTransactionInputs(tx, nextBlockHeight, txStore) + txFee, err := blockchain.CheckTransactionInputs(tx, + nextBlockHeight, + txStore, + false, // Don't check fraud proof; filled in by miner + mp.server.chainParams) if err != nil { if cerr, ok := err.(blockchain.RuleError); ok { return nil, chainRuleError(cerr) @@ -1113,7 +1523,7 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // Don't allow transactions with non-standard inputs if the network // parameters forbid their relaying. if !activeNetParams.RelayNonStdTxs { - err := checkInputsStandard(tx, txStore) + err := checkInputsStandard(tx, txType, txStore) if err != nil { // Attempt to extract a reject code from the error so // it can be retained. When not possible, fall back to @@ -1137,20 +1547,20 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // the coinbase address itself can contain signature operations, the // maximum allowed signature operations per transaction is less than // the maximum allowed signature operations per block. - numSigOps, err := blockchain.CountP2SHSigOps(tx, false, txStore) + numSigOps, err := blockchain.CountP2SHSigOps(tx, false, isSSGen, txStore) if err != nil { if cerr, ok := err.(blockchain.RuleError); ok { return nil, chainRuleError(cerr) } return nil, err } - numSigOps += blockchain.CountSigOps(tx) + + numSigOps += blockchain.CountSigOps(tx, false, isSSGen) if numSigOps > maxSigOpsPerTx { str := fmt.Sprintf("transaction %v has too many sigops: %d > %d", txHash, numSigOps, maxSigOpsPerTx) return nil, txRuleError(wire.RejectNonstandard, str) } - // Don't allow transactions with fees too low to get into a mined block. // // Most miners allow a free transaction area in blocks they mine to go @@ -1163,19 +1573,22 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // transaction does not exceeed 1000 less than the reserved space for // high-priority transactions, don't require a fee for it. serializedSize := int64(tx.MsgTx().SerializeSize()) - minFee := calcMinRequiredTxRelayFee(serializedSize) - if serializedSize >= (defaultBlockPrioritySize-1000) && txFee < minFee { - str := fmt.Sprintf("transaction %v has %d fees which is under "+ - "the required amount of %d", txHash, txFee, - minFee) - return nil, txRuleError(wire.RejectInsufficientFee, str) + minFee := calcMinRequiredTxRelayFee(serializedSize, mp.server.chainParams) + if txType == stake.TxTypeRegular { // Non-stake only + if serializedSize >= (defaultBlockPrioritySize-1000) && txFee < minFee { + str := fmt.Sprintf("transaction %v has %d fees which is under "+ + "the required amount of %d", txHash, txFee, + minFee) + return nil, txRuleError(wire.RejectInsufficientFee, str) + } } // Require that free transactions have sufficient priority to be mined // in the next block. Transactions which are being added back to the // memory pool from blocks that have been disconnected during a reorg // are exempted. - if isNew && !cfg.NoRelayPriority && txFee < minFee { + if isNew && !cfg.NoRelayPriority && txFee < minFee && + txType == stake.TxTypeRegular { txD := &TxDesc{ Tx: tx, Added: time.Now(), @@ -1193,7 +1606,7 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // Free-to-relay transactions are rate limited here to prevent // penny-flooding with tiny transactions as a form of attack. - if rateLimit && txFee < minFee { + if rateLimit && txFee < minFee && txType == stake.TxTypeRegular { nowUnix := time.Now().Unix() // we decay passed data with an exponentially decaying ~10 // minutes window - matches bitcoind handling. @@ -1227,7 +1640,16 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo } // Add to transaction pool. - mp.addTransaction(tx, curHeight, txFee) + mp.addTransaction(tx, txType, curHeight, txFee) + + // If it's an SSGen (vote), insert it into the list of + // votes. + if txType == stake.TxTypeSSGen { + err := mp.InsertVote(tx) + if err != nil { + return nil, err + } + } txmpLog.Debugf("Accepted transaction %v (pool size: %v)", txHash, len(mp.pool)) @@ -1247,15 +1669,13 @@ func (mp *txMemPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // MaybeAcceptTransaction is the main workhorse for handling insertion of new // free-standing transactions into a memory pool. It includes functionality // such as rejecting duplicate transactions, ensuring transactions follow all -// rules, detecting orphan transactions, and insertion into the memory pool. -// -// If the transaction is an orphan (missing parent transactions), the -// transaction is NOT added to the orphan pool, but each unknown referenced -// parent is returned. Use ProcessTransaction instead if new orphans should -// be added to the orphan pool. +// rules, orphan transaction handling, and insertion into the memory pool. The +// isOrphan parameter can be nil if the caller does not need to know whether +// or not the transaction is an orphan. // // This function is safe for concurrent access. -func (mp *txMemPool) MaybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit bool) ([]*wire.ShaHash, error) { +func (mp *txMemPool) MaybeAcceptTransaction(tx *dcrutil.Tx, isNew, + rateLimit bool) ([]*chainhash.Hash, error) { // Protect concurrent access. mp.Lock() defer mp.Unlock() @@ -1267,14 +1687,14 @@ func (mp *txMemPool) MaybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit boo // ProcessOrphans. See the comment for ProcessOrphans for more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *txMemPool) processOrphans(hash *wire.ShaHash) error { +func (mp *txMemPool) processOrphans(hash *chainhash.Hash) { // Start with processing at least the passed hash. processHashes := list.New() processHashes.PushBack(hash) for processHashes.Len() > 0 { // Pop the first hash to process. firstElement := processHashes.Remove(processHashes.Front()) - processHash := firstElement.(*wire.ShaHash) + processHash := firstElement.(*chainhash.Hash) // Look up all orphans that are referenced by the transaction we // just accepted. This will typically only be one, but it could @@ -1289,7 +1709,7 @@ func (mp *txMemPool) processOrphans(hash *wire.ShaHash) error { var enext *list.Element for e := orphans.Front(); e != nil; e = enext { enext = e.Next() - tx := e.Value.(*btcutil.Tx) + tx := e.Value.(*dcrutil.Tx) // Remove the orphan from the orphan pool. Current // behavior requires that all saved orphans with @@ -1309,26 +1729,28 @@ func (mp *txMemPool) processOrphans(hash *wire.ShaHash) error { // Potentially accept the transaction into the // transaction pool. - missingParents, err := mp.maybeAcceptTransaction(tx, - true, true) + missingParents, err := mp.maybeAcceptTransaction(tx, true, true) if err != nil { - return err + // TODO: Remove orphans that depend on this + // failed transaction. + txmpLog.Debugf("Unable to move "+ + "orphan transaction %v to mempool: %v", + tx.Sha(), err) + continue } - if len(missingParents) == 0 { - // Generate and relay the inventory vector for the - // newly accepted transaction. - iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) - mp.server.RelayInventory(iv, tx) - } else { - // Transaction is still an orphan. - // TODO(jrick): This removeOrphan call is - // likely unnecessary as it was unconditionally - // removed above and maybeAcceptTransaction won't - // add it back. - mp.removeOrphan(orphanHash) + if len(missingParents) > 0 { + // Transaction is still an orphan, so add it + // back. + mp.addOrphan(tx) + continue } + // Generate and relay the inventory vector for the + // newly accepted transaction. + iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) + mp.server.RelayInventory(iv, tx) + // Add this transaction to the list of transactions to // process so any orphans that depend on this one are // handled too. @@ -1345,8 +1767,60 @@ func (mp *txMemPool) processOrphans(hash *wire.ShaHash) error { processHashes.PushBack(orphanHash) } } +} - return nil +// PruneStakeTx is the function which is called everytime a new block is +// processed. The idea is any outstanding SStx that hasn't been mined in a +// certain period of time (CoinbaseMaturity) and the submitted SStx's +// stake difficulty is below the current required stake difficulty should be +// pruned from mempool since they will never be mined. The same idea stands +// for SSGen and SSRtx +func (mp *txMemPool) PruneStakeTx(requiredStakeDifficulty, height int64) { + // Protect concurrent access. + mp.Lock() + defer mp.Unlock() + + mp.pruneStakeTx(requiredStakeDifficulty, height) +} + +func (mp *txMemPool) pruneStakeTx(requiredStakeDifficulty, height int64) { + for _, tx := range mp.pool { + txType := detectTxType(tx.Tx) + if txType == stake.TxTypeSStx && + tx.Height+int64(heightDiffToPruneTicket) < height { + mp.removeTransaction(tx.Tx, true) + } + if txType == stake.TxTypeSStx && + tx.Tx.MsgTx().TxOut[0].Value < requiredStakeDifficulty { + mp.removeTransaction(tx.Tx, true) + } + if (txType == stake.TxTypeSSRtx || txType == stake.TxTypeSSGen) && + tx.Height+int64(heightDiffToPruneVotes) < height { + mp.removeTransaction(tx.Tx, true) + } + } +} + +// PruneExpiredTx prunes expired transactions from the mempool that may no longer +// be able to be included into a block. +func (mp *txMemPool) PruneExpiredTx(height int64) { + // Protect concurrent access. + mp.Lock() + defer mp.Unlock() + + mp.pruneExpiredTx(height) +} + +func (mp *txMemPool) pruneExpiredTx(height int64) { + for _, tx := range mp.pool { + if tx.Tx.MsgTx().Expiry != 0 { + if height >= int64(tx.Tx.MsgTx().Expiry) { + txmpLog.Debugf("Pruning expired transaction %v from the "+ + "mempool", tx.Tx.Sha()) + mp.removeTransaction(tx.Tx, true) + } + } + } } // ProcessOrphans determines if there are any orphans which depend on the passed @@ -1356,11 +1830,10 @@ func (mp *txMemPool) processOrphans(hash *wire.ShaHash) error { // orphans) until there are no more. // // This function is safe for concurrent access. -func (mp *txMemPool) ProcessOrphans(hash *wire.ShaHash) error { +func (mp *txMemPool) ProcessOrphans(hash *chainhash.Hash) { mp.Lock() - defer mp.Unlock() - - return mp.processOrphans(hash) + mp.processOrphans(hash) + mp.Unlock() } // ProcessTransaction is the main workhorse for handling insertion of new @@ -1369,7 +1842,8 @@ func (mp *txMemPool) ProcessOrphans(hash *wire.ShaHash) error { // rules, orphan transaction handling, and insertion into the memory pool. // // This function is safe for concurrent access. -func (mp *txMemPool) ProcessTransaction(tx *btcutil.Tx, allowOrphan, rateLimit bool) error { +func (mp *txMemPool) ProcessTransaction(tx *dcrutil.Tx, allowOrphan, + rateLimit bool) error { // Protect concurrent access. mp.Lock() defer mp.Unlock() @@ -1377,41 +1851,50 @@ func (mp *txMemPool) ProcessTransaction(tx *btcutil.Tx, allowOrphan, rateLimit b txmpLog.Tracef("Processing transaction %v", tx.Sha()) // Potentially accept the transaction to the memory pool. - missingParents, err := mp.maybeAcceptTransaction(tx, true, rateLimit) + var isOrphan bool + _, err := mp.maybeAcceptTransaction(tx, true, rateLimit) if err != nil { return err } - if len(missingParents) == 0 { + if !isOrphan { // Generate the inventory vector and relay it. iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) mp.server.RelayInventory(iv, tx) // Accept any orphan transactions that depend on this - // transaction (they may no longer be orphans if all inputs - // are now available) and repeat for those accepted - // transactions until there are no more. - err := mp.processOrphans(tx.Sha()) - if err != nil { - return err - } + // transaction (they are no longer orphans) and repeat for those + // accepted transactions until there are no more. + mp.processOrphans(tx.Sha()) } else { // The transaction is an orphan (has inputs missing). Reject // it if the flag to allow orphans is not set. if !allowOrphan { - // Only use the first missing parent transaction in - // the error message. - // // NOTE: RejectDuplicate is really not an accurate // reject code here, but it matches the reference // implementation and there isn't a better choice due // to the limited number of reject codes. Missing // inputs is assumed to mean they are already spent // which is not really always the case. - str := fmt.Sprintf("orphan transaction %v references "+ - "outputs of unknown or fully-spent "+ - "transaction %v", tx.Sha(), missingParents[0]) - return txRuleError(wire.RejectDuplicate, str) + var buf bytes.Buffer + buf.WriteString("transaction spends unknown inputs; includes " + + "inputs: \n") + lenIn := len(tx.MsgTx().TxIn) + for i, txIn := range tx.MsgTx().TxIn { + str := fmt.Sprintf("[%v]: %v, %v, %v", + i, + txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index, + txIn.PreviousOutPoint.Tree) + buf.WriteString(str) + if i != lenIn-1 { + buf.WriteString("\n") + } + } + txmpLog.Debugf("%v", buf.String()) + + return txRuleError(wire.RejectDuplicate, + "transaction spends unknown inputs") } // Potentially add the orphan transaction to the orphan pool. @@ -1439,11 +1922,11 @@ func (mp *txMemPool) Count() int { // pool. // // This function is safe for concurrent access. -func (mp *txMemPool) TxShas() []*wire.ShaHash { +func (mp *txMemPool) TxShas() []*chainhash.Hash { mp.RLock() defer mp.RUnlock() - hashes := make([]*wire.ShaHash, len(mp.pool)) + hashes := make([]*chainhash.Hash, len(mp.pool)) i := 0 for hash := range mp.pool { hashCopy := hash @@ -1483,18 +1966,39 @@ func (mp *txMemPool) LastUpdated() time.Time { return mp.lastUpdated } +// CheckIfTxsExist checks a list of transaction hashes against the mempool +// and returns true if they all exist in the mempool, otherwise false. +// +// This function is safe for concurrent access. +func (mp *txMemPool) CheckIfTxsExist(hashes []chainhash.Hash) bool { + mp.RLock() + defer mp.RUnlock() + + inPool := true + for _, h := range hashes { + if _, exists := mp.pool[h]; !exists { + inPool = false + break + } + } + + return inPool +} + // newTxMemPool returns a new memory pool for validating and storing standalone // transactions until they are mined into a block. func newTxMemPool(server *server) *txMemPool { memPool := &txMemPool{ server: server, - pool: make(map[wire.ShaHash]*TxDesc), - orphans: make(map[wire.ShaHash]*btcutil.Tx), - orphansByPrev: make(map[wire.ShaHash]*list.List), - outpoints: make(map[wire.OutPoint]*btcutil.Tx), + pool: make(map[chainhash.Hash]*TxDesc), + orphans: make(map[chainhash.Hash]*dcrutil.Tx), + orphansByPrev: make(map[chainhash.Hash]*list.List), + outpoints: make(map[wire.OutPoint]*dcrutil.Tx), + votes: make(map[chainhash.Hash][]*VoteTx), } - if cfg.AddrIndex { - memPool.addrindex = make(map[string]map[wire.ShaHash]struct{}) + + if !cfg.NoAddrIndex { + memPool.addrindex = make(map[string]map[chainhash.Hash]struct{}) } return memPool } diff --git a/mempoolerror.go b/mempoolerror.go index a291c9c2..c2a00921 100644 --- a/mempoolerror.go +++ b/mempoolerror.go @@ -1,12 +1,13 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/wire" ) // RuleError identifies a rule violation. It is used to indicate that diff --git a/mining.go b/mining.go index 4af91138..015bab10 100644 --- a/mining.go +++ b/mining.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,44 +8,40 @@ package main import ( "container/heap" "container/list" + "encoding/binary" "fmt" + "math" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( - // generatedBlockVersion is the version of the block being generated. - // It is defined as a constant here rather than using the - // wire.BlockVersion constant since a change in the block version - // will require changes to the generated block. Using the wire constant - // for generated block version could allow creation of invalid blocks - // for the updated version. - generatedBlockVersion = 3 - // minHighPriority is the minimum priority value that allows a // transaction to be considered high priority. - minHighPriority = btcutil.SatoshiPerBitcoin * 144.0 / 250 + minHighPriority = dcrutil.AtomsPerCoin * 144.0 / 250 // blockHeaderOverhead is the max number of bytes it takes to serialize // a block header and max possible transaction count. blockHeaderOverhead = wire.MaxBlockHeaderPayload + wire.MaxVarIntPayload - // coinbaseFlags is added to the coinbase script of a generated block - // and is used to monitor BIP16 support as well as blocks that are - // generated via btcd. - coinbaseFlags = "/P2SH/btcd/" + // coinbaseFlags is some extra data appended to the coinbase script + // sig. + coinbaseFlags = "/dcrd/" ) // txPrioItem houses a transaction along with extra information that allows the // transaction to be prioritized and track dependencies on other transactions // which have not been mined into a block yet. type txPrioItem struct { - tx *btcutil.Tx + tx *dcrutil.Tx fee int64 priority float64 feePerKB float64 @@ -53,11 +50,11 @@ type txPrioItem struct { // on. It will only be set when the transaction references other // transactions in the memory pool and hence must come after them in // a block. - dependsOn map[wire.ShaHash]struct{} + dependsOn map[chainhash.Hash]struct{} } // txPriorityQueueLessFunc describes a function that can be used as a compare -// function for a transaction priority queue (txPriorityQueue). +// function for a transation priority queue (txPriorityQueue). type txPriorityQueueLessFunc func(*txPriorityQueue, int, int) bool // txPriorityQueue implements a priority queue of txPrioItem elements that @@ -151,6 +148,20 @@ func newTxPriorityQueue(reserve int, sortByFee bool) *txPriorityQueue { return pq } +// containsTx is a helper function that checks to see if a list of transactions +// contains any of the TxIns of some transaction. +func containsTxIns(txs []*dcrutil.Tx, tx *dcrutil.Tx) bool { + for _, txToCheck := range txs { + for _, txIn := range tx.MsgTx().TxIn { + if txIn.PreviousOutPoint.Hash.IsEqual(txToCheck.Sha()) { + return true + } + } + } + + return false +} + // BlockTemplate houses a block that has yet to be solved along with additional // details about the fees and the number of signature operations for each // transaction in the block. @@ -162,28 +173,144 @@ type BlockTemplate struct { validPayAddress bool } +// hashExistsInList checks if a hash exists in a list of hashes. +func hashExistsInList(hash *chainhash.Hash, list []chainhash.Hash) bool { + for _, h := range list { + if hash.IsEqual(&h) { + return true + } + } + + return false +} + +// txIndexFromTxList returns a transaction's index in a list, or -1 if it +// can not be found. +func txIndexFromTxList(hash chainhash.Hash, list []*dcrutil.Tx) int { + for i, tx := range list { + h := tx.Sha() + if hash == *h { + return i + } + } + + return -1 +} + // mergeTxStore adds all of the transactions in txStoreB to txStoreA. The // result is that txStoreA will contain all of its original transactions plus // all of the transactions in txStoreB. func mergeTxStore(txStoreA blockchain.TxStore, txStoreB blockchain.TxStore) { for hash, txDataB := range txStoreB { if txDataA, exists := txStoreA[hash]; !exists || - (txDataA.Err == database.ErrTxShaMissing && - txDataB.Err != database.ErrTxShaMissing) { + (txDataA.Err == database.ErrTxShaMissing && txDataB.Err != + database.ErrTxShaMissing) { txStoreA[hash] = txDataB } } } -// standardCoinbaseScript returns a standard script suitable for use as the -// signature script of the coinbase transaction of a new block. In particular, -// it starts with the block height that is required by version 2 blocks and adds -// the extra nonce as well as additional coinbase flags. -func standardCoinbaseScript(nextBlockHeight int64, extraNonce uint64) ([]byte, error) { - return txscript.NewScriptBuilder().AddInt64(nextBlockHeight). - AddInt64(int64(extraNonce)).AddData([]byte(coinbaseFlags)). - Script() +// standardCoinbaseOpReturn creates a standard OP_RETURN output to insert into +// coinbase to use as extranonces. The OP_RETURN pushes 32 bytes. +func standardCoinbaseOpReturn(height uint32, extraNonces []uint64) ([]byte, + error) { + if len(extraNonces) != 4 { + return nil, fmt.Errorf("extranonces has wrong num uint64s") + } + + enData := make([]byte, 36, 36) + binary.LittleEndian.PutUint32(enData[0:4], height) + binary.LittleEndian.PutUint64(enData[4:12], extraNonces[0]) + binary.LittleEndian.PutUint64(enData[12:20], extraNonces[1]) + binary.LittleEndian.PutUint64(enData[20:28], extraNonces[2]) + binary.LittleEndian.PutUint64(enData[28:36], extraNonces[3]) + extraNonceScript, err := txscript.GenerateProvablyPruneableOut(enData) + if err != nil { + return nil, err + } + + return extraNonceScript, nil +} + +// getCoinbaseExtranonce extracts the extranonce from a block template's +// coinbase transaction. +func (bt *BlockTemplate) getCoinbaseExtranonces() []uint64 { + if len(bt.block.Transactions[0].TxOut) < 2 { + return []uint64{0, 0, 0, 0} + } + + if len(bt.block.Transactions[0].TxOut[1].PkScript) < 38 { + return []uint64{0, 0, 0, 0} + } + + ens := make([]uint64, 4, 4) // 32-bytes + ens[0] = binary.LittleEndian.Uint64( + bt.block.Transactions[0].TxOut[1].PkScript[6:14]) + ens[1] = binary.LittleEndian.Uint64( + bt.block.Transactions[0].TxOut[1].PkScript[14:22]) + ens[2] = binary.LittleEndian.Uint64( + bt.block.Transactions[0].TxOut[1].PkScript[22:30]) + ens[3] = binary.LittleEndian.Uint64( + bt.block.Transactions[0].TxOut[1].PkScript[30:38]) + + return ens +} + +// getCoinbaseExtranonce extracts the extranonce from a block template's +// coinbase transaction. +func getCoinbaseExtranonces(msgBlock *wire.MsgBlock) []uint64 { + if len(msgBlock.Transactions[0].TxOut) < 2 { + return []uint64{0, 0, 0, 0} + } + + if len(msgBlock.Transactions[0].TxOut[1].PkScript) < 38 { + return []uint64{0, 0, 0, 0} + } + + ens := make([]uint64, 4, 4) // 32-bytes + ens[0] = binary.LittleEndian.Uint64( + msgBlock.Transactions[0].TxOut[1].PkScript[6:14]) + ens[1] = binary.LittleEndian.Uint64( + msgBlock.Transactions[0].TxOut[1].PkScript[14:22]) + ens[2] = binary.LittleEndian.Uint64( + msgBlock.Transactions[0].TxOut[1].PkScript[22:30]) + ens[3] = binary.LittleEndian.Uint64( + msgBlock.Transactions[0].TxOut[1].PkScript[30:38]) + + return ens +} + +// UpdateExtraNonce updates the extra nonce in the coinbase script of the passed +// block by regenerating the coinbase script with the passed value and block +// height. It also recalculates and updates the new merkle root that results +// from changing the coinbase script. +func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int64, + extraNonces []uint64) error { + // First block has no extranonce. + if blockHeight == 1 { + return nil + } + if len(extraNonces) != 4 { + return fmt.Errorf("not enough nonce information passed") + } + + coinbaseOpReturn, err := standardCoinbaseOpReturn(uint32(blockHeight), + extraNonces) + if err != nil { + return err + } + msgBlock.Transactions[0].TxOut[1].PkScript = coinbaseOpReturn + + // TODO(davec): A dcrutil.Block should use saved in the state to avoid + // recalculating all of the other transaction hashes. + // block.Transactions[0].InvalidateCache() + + // Recalculate the merkle root with the updated extra nonce. + block := dcrutil.NewBlockDeepCopyCoinbase(msgBlock) + merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) + msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + return nil } // createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy @@ -192,47 +319,130 @@ func standardCoinbaseScript(nextBlockHeight int64, extraNonce uint64) ([]byte, e // // See the comment for NewBlockTemplate for more information about why the nil // address handling is useful. -func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int64, addr btcutil.Address) (*btcutil.Tx, error) { +func createCoinbaseTx(coinbaseScript []byte, + opReturnPkScript []byte, + nextBlockHeight int64, + addr dcrutil.Address, + voters uint16, + params *chaincfg.Params) (*dcrutil.Tx, error) { + + tx := wire.NewMsgTx() + tx.AddTxIn(&wire.TxIn{ + // Coinbase transactions have no inputs, so previous outpoint is + // zero hash and max index. + PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + wire.MaxPrevOutIndex, dcrutil.TxTreeRegular), + Sequence: wire.MaxTxInSequenceNum, + BlockHeight: wire.NullBlockHeight, + BlockIndex: wire.NullBlockIndex, + SignatureScript: coinbaseScript, + }) + + // Block one is a special block that might pay out tokens to a ledger. + if nextBlockHeight == 1 && len(params.BlockOneLedger) != 0 { + // Convert the addresses in the ledger into useable format. + addrs := make([]dcrutil.Address, + len(params.BlockOneLedger), + len(params.BlockOneLedger)) + for i, payout := range params.BlockOneLedger { + addr, err := dcrutil.DecodeAddress(payout.Address, params) + if err != nil { + return nil, err + } + addrs[i] = addr + } + + for i, payout := range params.BlockOneLedger { + // Make payout to this address. + pks, err := txscript.PayToAddrScript(addrs[i]) + if err != nil { + return nil, err + } + tx.AddTxOut(&wire.TxOut{ + Value: payout.Amount, + PkScript: pks, + }) + } + + tx.TxIn[0].ValueIn = params.BlockOneSubsidy() + + return dcrutil.NewTx(tx), nil + } + + // Create a coinbase with correct block subsidy and extranonce. + subsidy := blockchain.CalcBlockWorkSubsidy(nextBlockHeight, + voters, + activeNetParams.Params) + tax := blockchain.CalcBlockTaxSubsidy(nextBlockHeight, + voters, + activeNetParams.Params) + addrOrg, err := dcrutil.DecodeAddress(params.OrganizationAddress, params) + if err != nil { + return nil, err + } + pksOrg, err := txscript.PayToAddrScript(addrOrg) + if err != nil { + return nil, err + } + + // Tax output. + if params.BlockTaxProportion > 0 { + tx.AddTxOut(&wire.TxOut{ + Value: tax, + PkScript: pksOrg, + }) + } else { + // Tax disabled. + scriptBuilder := txscript.NewScriptBuilder() + trueScript, err := scriptBuilder.AddOp(txscript.OP_TRUE).Script() + if err != nil { + return nil, err + } + tx.AddTxOut(&wire.TxOut{ + Value: tax, + PkScript: trueScript, + }) + } + // Extranonce. + tx.AddTxOut(&wire.TxOut{ + Value: 0, + PkScript: opReturnPkScript, + }) + // ValueIn. + tx.TxIn[0].ValueIn = subsidy + tax + // Create the script to pay to the provided payment address if one was // specified. Otherwise create a script that allows the coinbase to be // redeemable by anyone. - var pkScript []byte + var pksSubsidy []byte if addr != nil { var err error - pkScript, err = txscript.PayToAddrScript(addr) + pksSubsidy, err = txscript.PayToAddrScript(addr) if err != nil { return nil, err } } else { var err error scriptBuilder := txscript.NewScriptBuilder() - pkScript, err = scriptBuilder.AddOp(txscript.OP_TRUE).Script() + pksSubsidy, err = scriptBuilder.AddOp(txscript.OP_TRUE).Script() if err != nil { return nil, err } } - - tx := wire.NewMsgTx() - tx.AddTxIn(&wire.TxIn{ - // Coinbase transactions have no inputs, so previous outpoint is - // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&wire.ShaHash{}, - wire.MaxPrevOutIndex), - SignatureScript: coinbaseScript, - Sequence: wire.MaxTxInSequenceNum, - }) + // Subsidy paid to miner. tx.AddTxOut(&wire.TxOut{ - Value: blockchain.CalcBlockSubsidy(nextBlockHeight, - activeNetParams.Params), - PkScript: pkScript, + Value: subsidy, + PkScript: pksSubsidy, }) - return btcutil.NewTx(tx), nil + + return dcrutil.NewTx(tx), nil } // spendTransaction updates the passed transaction store by marking the inputs // to the passed transaction as spent. It also adds the passed transaction to // the store at the provided height. -func spendTransaction(txStore blockchain.TxStore, tx *btcutil.Tx, height int64) error { +func spendTransaction(txStore blockchain.TxStore, tx *dcrutil.Tx, + height int64) error { for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index @@ -245,6 +455,7 @@ func spendTransaction(txStore blockchain.TxStore, tx *btcutil.Tx, height int64) Tx: tx, Hash: tx.Sha(), BlockHeight: height, + BlockIndex: wire.NullBlockIndex, Spent: make([]bool, len(tx.MsgTx().TxOut)), Err: nil, } @@ -254,7 +465,7 @@ func spendTransaction(txStore blockchain.TxStore, tx *btcutil.Tx, height int64) // logSkippedDeps logs any dependencies which are also skipped as a result of // skipping a transaction while generating a block template at the trace level. -func logSkippedDeps(tx *btcutil.Tx, deps *list.List) { +func logSkippedDeps(tx *dcrutil.Tx, deps *list.List) { if deps == nil { return } @@ -283,7 +494,8 @@ func minimumMedianTime(chainState *chainState) (time.Time, error) { // medianAdjustedTime returns the current time adjusted to ensure it is at least // one second after the median timestamp of the last several blocks per the // chain consensus rules. -func medianAdjustedTime(chainState *chainState, timeSource blockchain.MedianTimeSource) (time.Time, error) { +func medianAdjustedTime(chainState *chainState, + timeSource blockchain.MedianTimeSource) (time.Time, error) { chainState.Lock() defer chainState.Unlock() if chainState.pastMedianTimeErr != nil { @@ -302,9 +514,399 @@ func medianAdjustedTime(chainState *chainState, timeSource blockchain.MedianTime newTimestamp = minTimestamp } + // Adjust by the amount requested from the command line argument. + newTimestamp = newTimestamp.Add( + time.Duration(-cfg.MiningTimeOffset) * time.Second) + return newTimestamp, nil } +// maybeInsertStakeTx checks to make sure that a stake tx is +// valid from the perspective of the mainchain (not necessarily +// the mempool or block) before inserting into a tx tree. +// If it fails the check, it returns false; otherwise true. +func maybeInsertStakeTx(mp *txMemPool, stx *dcrutil.Tx, treeValid bool) bool { + bm := mp.server.blockManager + + missingInput := false + + txStore, err := bm.FetchTransactionStore(stx, treeValid) + if err != nil { + minrLog.Warnf("Unable to fetch transaction store for "+ + "stx %s: %v", stx.Sha(), err) + return false + } + isSSGen, _ := stake.IsSSGen(stx) + for i, txIn := range stx.MsgTx().TxIn { + // Evaluate if this is a stakebase input or not. If it + // is, continue without evaluation of the input. + // if isStakeBase + if isSSGen && (i == 0) { + txIn.BlockHeight = wire.NullBlockHeight + txIn.BlockIndex = wire.NullBlockIndex + + continue + } + + originHash := &txIn.PreviousOutPoint.Hash + txData, exists := txStore[*originHash] + if !exists || txData.Err != nil || txData.Tx == nil { + missingInput = true + break + } else { + originIdx := txIn.PreviousOutPoint.Index + txIn.ValueIn = txData.Tx.MsgTx().TxOut[originIdx].Value + txIn.BlockHeight = uint32(txData.BlockHeight) + txIn.BlockIndex = txData.BlockIndex + } + } + return !missingInput +} + +// deepCopyBlockTemplate returns a deeply copied block template that copies all +// data except a block's references to transactions, which are kept as pointers +// in the block. This is considered safe because transaction data is generally +// immutable, with the exception of coinbases which we alternatively also +// deep copy. +func deepCopyBlockTemplate(blockTemplate *BlockTemplate) *BlockTemplate { + if blockTemplate == nil { + return nil + } + + // Deep copy the header, which we hash on. + headerCopy := wire.BlockHeader{ + Version: blockTemplate.block.Header.Version, + PrevBlock: blockTemplate.block.Header.PrevBlock, + MerkleRoot: blockTemplate.block.Header.MerkleRoot, + StakeRoot: blockTemplate.block.Header.StakeRoot, + VoteBits: blockTemplate.block.Header.VoteBits, + FinalState: blockTemplate.block.Header.FinalState, + Voters: blockTemplate.block.Header.Voters, + FreshStake: blockTemplate.block.Header.FreshStake, + Revocations: blockTemplate.block.Header.Revocations, + PoolSize: blockTemplate.block.Header.PoolSize, + Timestamp: blockTemplate.block.Header.Timestamp, + Bits: blockTemplate.block.Header.Bits, + SBits: blockTemplate.block.Header.SBits, + Nonce: blockTemplate.block.Header.Nonce, + Height: blockTemplate.block.Header.Height, + Size: blockTemplate.block.Header.Size, + } + + // Copy transactions pointers. Duplicate the coinbase + // transaction, because it might update it by modifying + // the extra nonce. + transactionsCopy := make([]*wire.MsgTx, len(blockTemplate.block.Transactions), + len(blockTemplate.block.Transactions)) + coinbaseCopy := + dcrutil.NewTxDeep(blockTemplate.block.Transactions[0]) + for i, mtx := range blockTemplate.block.Transactions { + if i == 0 { + transactionsCopy[i] = coinbaseCopy.MsgTx() + } else { + transactionsCopy[i] = mtx + } + } + + sTransactionsCopy := make([]*wire.MsgTx, + len(blockTemplate.block.STransactions), + len(blockTemplate.block.STransactions)) + for i, mtx := range blockTemplate.block.STransactions { + sTransactionsCopy[i] = mtx + } + + msgBlockCopy := &wire.MsgBlock{ + headerCopy, + transactionsCopy, + sTransactionsCopy, + } + + fees := make([]int64, len(blockTemplate.fees), len(blockTemplate.fees)) + for i, f := range blockTemplate.fees { + fees[i] = f + } + + sigOps := make([]int64, len(blockTemplate.sigOpCounts), + len(blockTemplate.sigOpCounts)) + for i, s := range blockTemplate.sigOpCounts { + sigOps[i] = s + } + + return &BlockTemplate{ + block: msgBlockCopy, + fees: fees, + sigOpCounts: sigOps, + height: blockTemplate.height, + validPayAddress: blockTemplate.validPayAddress, + } +} + +// handleTooFewVoters handles the situation in which there are too few voters on +// of the blockchain. If there are too few voters and a cached parent template to +// work off of is present, it will return a copy of that template to pass to the +// miner. +// Safe for concurrent access. +func handleTooFewVoters(nextHeight int64, + miningAddress dcrutil.Address, + bm *blockManager) (*BlockTemplate, error) { + timeSource := bm.server.timeSource + chainState := &bm.chainState + stakeValidationHeight := bm.server.chainParams.StakeValidationHeight + curTemplate := bm.GetCurrentTemplate() + + // Check to see if we've fallen off the chain, for example if a + // reorganization had recently occurred. If this is the case, + // nuke the templates. + bestHeader := chainState.GetTopBlockHeader() + if curTemplate != nil { + if !bestHeader.PrevBlock.IsEqual( + &curTemplate.block.Header.PrevBlock) { + minrLog.Debugf("Cached mining templates are no longer current, " + + "resetting") + bm.SetCurrentTemplate(nil) + bm.SetParentTemplate(nil) + } + } + + // Handle not enough voters being present if we're set to mine aggressively + // (default behaviour). + if nextHeight >= stakeValidationHeight { + if bm.AggressiveMining { + if curTemplate != nil { + cptCopy := deepCopyBlockTemplate(curTemplate) + + // Update the timestamp of the old template. + ts, err := medianAdjustedTime(chainState, timeSource) + if err != nil { + return nil, err + } + cptCopy.block.Header.Timestamp = ts + + // If we're on testnet, the time since this last block + // listed as the parent must be taken into consideration. + if bm.server.chainParams.ResetMinDifficulty { + parentHash := cptCopy.block.Header.PrevBlock + + requiredDifficulty, err := + bm.CalcNextRequiredDiffNode(&parentHash, ts) + if err != nil { + return nil, miningRuleError(ErrGettingDifficulty, + err.Error()) + } + + cptCopy.block.Header.Bits = requiredDifficulty + } + + // Choose a new extranonce value that is one greater + // than the previous extranonce, so we don't remine the + // same block and choose the same winners as before. + ens := cptCopy.getCoinbaseExtranonces() + ens[0] += 1 + UpdateExtraNonce(cptCopy.block, cptCopy.height, ens) + + // Update extranonce of the original template too, so + // we keep getting unique numbers. + UpdateExtraNonce(curTemplate.block, curTemplate.height, ens) + + // Make sure the block validates. + block := dcrutil.NewBlockDeepCopyCoinbase(cptCopy.block) + block.SetHeight(cptCopy.height) + if err := blockchain.CheckWorklessBlockSanity(block, + bm.server.timeSource, + bm.server.chainParams); err != nil { + return nil, miningRuleError(ErrCheckConnectBlock, + err.Error()) + } + + if err := bm.CheckConnectBlock(block); err != nil { + minrLog.Errorf("failed to check template: %v", err.Error()) + return nil, miningRuleError(ErrCheckConnectBlock, + err.Error()) + } + + return cptCopy, nil + } + + // We may have just started mining and stored the current block + // template, so we don't have a parent. + if curTemplate == nil { + // Fetch the latest block and head and begin working + // off of it with an empty transaction tree regular + // and the contents of that stake tree. In the future + // we should have the option of readding some + // transactions from this block, too. + topBlock, err := + bm.GetTopBlockFromChain() + if err != nil { + return nil, fmt.Errorf("failed to get top block from " + + "chain") + } + btMsgBlock := new(wire.MsgBlock) + rand, err := wire.RandomUint64() + if err != nil { + return nil, err + } + opReturnPkScript, err := + standardCoinbaseOpReturn(topBlock.MsgBlock().Header.Height, + []uint64{0, 0, 0, rand}) + if err != nil { + return nil, err + } + coinbaseTx, err := createCoinbaseTx([]byte{0x01, 0x02}, + opReturnPkScript, + topBlock.Height(), + miningAddress, + topBlock.MsgBlock().Header.Voters, + bm.server.chainParams) + if err != nil { + return nil, err + } + btMsgBlock.AddTransaction(coinbaseTx.MsgTx()) + + for _, stx := range topBlock.STransactions() { + btMsgBlock.AddSTransaction(stx.MsgTx()) + } + + // Copy the rest of the header. + btMsgBlock.Header.Version = + topBlock.MsgBlock().Header.Version + btMsgBlock.Header.PrevBlock = + topBlock.MsgBlock().Header.PrevBlock + btMsgBlock.Header.VoteBits = + topBlock.MsgBlock().Header.VoteBits + btMsgBlock.Header.FinalState = + topBlock.MsgBlock().Header.FinalState + btMsgBlock.Header.Voters = + topBlock.MsgBlock().Header.Voters + btMsgBlock.Header.FreshStake = + topBlock.MsgBlock().Header.FreshStake + btMsgBlock.Header.Revocations = + topBlock.MsgBlock().Header.Revocations + btMsgBlock.Header.PoolSize = + topBlock.MsgBlock().Header.PoolSize + btMsgBlock.Header.Bits = + topBlock.MsgBlock().Header.Bits + btMsgBlock.Header.SBits = + topBlock.MsgBlock().Header.SBits + btMsgBlock.Header.Height = + topBlock.MsgBlock().Header.Height + + // Set a fresh timestamp. + ts, err := medianAdjustedTime(chainState, timeSource) + if err != nil { + return nil, err + } + btMsgBlock.Header.Timestamp = ts + + // If we're on testnet, the time since this last block + // listed as the parent must be taken into consideration. + if bm.server.chainParams.ResetMinDifficulty { + parentHash := topBlock.MsgBlock().Header.PrevBlock + + requiredDifficulty, err := + bm.CalcNextRequiredDiffNode(&parentHash, ts) + if err != nil { + return nil, miningRuleError(ErrGettingDifficulty, + err.Error()) + } + + btMsgBlock.Header.Bits = requiredDifficulty + } + + // Recalculate the size. + btMsgBlock.Header.Size = uint32(btMsgBlock.SerializeSize()) + + bt := &BlockTemplate{ + block: btMsgBlock, + fees: []int64{0}, + sigOpCounts: []int64{0}, + height: int64(topBlock.MsgBlock().Header.Height), + validPayAddress: miningAddress != nil, + } + + // Recalculate the merkle roots. Use a temporary 'immutable' + // block object as we're changing the header contents. + btBlockTemp := dcrutil.NewBlockDeepCopyCoinbase(btMsgBlock) + merkles := + blockchain.BuildMerkleTreeStore(btBlockTemp.Transactions()) + merklesStake := + blockchain.BuildMerkleTreeStore(btBlockTemp.STransactions()) + btMsgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + btMsgBlock.Header.StakeRoot = *merklesStake[len(merklesStake)-1] + + // Make sure the block validates. + btBlock := dcrutil.NewBlockDeepCopyCoinbase(btMsgBlock) + btBlock.SetHeight(bt.height) + if err := blockchain.CheckWorklessBlockSanity(btBlock, + bm.server.timeSource, + bm.server.chainParams); err != nil { + str := fmt.Sprintf("failed to check sanity of template: %v", + err.Error()) + return nil, miningRuleError(ErrCheckConnectBlock, + str) + } + + if err := bm.CheckConnectBlock(btBlock); err != nil { + str := fmt.Sprintf("failed to check template: %v", + err.Error()) + return nil, miningRuleError(ErrCheckConnectBlock, + str) + } + + // Make a copy to return. + cptCopy := deepCopyBlockTemplate(bt) + + return cptCopy, nil + } + } + } + + bmgrLog.Debugf("Not enough voters on top block to generate " + + "new block template") + + return nil, nil +} + +// handleCreatedBlockTemplate stores a successfully created block template to +// the appropriate cache if needed, then returns the template to the miner to +// work on. The stored template is a copy of the template, to prevent races +// from occuring in case the template is mined on by the CPUminer. +func handleCreatedBlockTemplate(blockTemplate *BlockTemplate, + bm *blockManager) (*BlockTemplate, error) { + curTemplate := bm.GetCurrentTemplate() + + nextBlockHeight := blockTemplate.height + stakeValidationHeight := bm.server.chainParams.StakeValidationHeight + // This is where we begin storing block templates, when either the + // program is freshly started or the chain is matured to stake + // validation height. + if curTemplate == nil && + nextBlockHeight >= stakeValidationHeight-2 { + bm.SetCurrentTemplate(blockTemplate) + } + + // We're at the height where the next block needs to include SSGens, + // so we check to if CachedCurrentTemplate is out of date. If it is, + // we store it as the cached parent template, and store the new block + // template as the currenct template. + if curTemplate != nil && + nextBlockHeight >= stakeValidationHeight-1 { + if curTemplate.height < nextBlockHeight { + bm.SetParentTemplate(curTemplate) + bm.SetCurrentTemplate(blockTemplate) + } + } + + // Overwrite the old cached block if it's out of date. + if curTemplate != nil { + if curTemplate.height == nextBlockHeight { + bm.SetCurrentTemplate(blockTemplate) + } + } + + return blockTemplate, nil +} + // NewBlockTemplate returns a new block template that is ready to be solved // using the transactions from the passed transaction memory pool and a coinbase // that either pays to the passed address if it is not nil, or a coinbase that @@ -340,7 +942,7 @@ func medianAdjustedTime(chainState *chainState, timeSource blockchain.MedianTime // case the block will be filled with the low-fee/free transactions until the // block size reaches that minimum size. // -// Any transactions which would cause the block to exceed the BlockMaxSize +// Any transactions whome mulch to go put down which is more fun than replacing hashes // configuration option, exceed the maximum allowed signature operations per // block, or otherwise cause the block to be invalid are skipped. // @@ -355,7 +957,7 @@ func medianAdjustedTime(chainState *chainState, timeSource blockchain.MedianTime // |-----------------------------------| | -- // | | | // | | | -// | | |--- cfg.BlockMaxSize +// | | |--- (cfg.BlockMaxSize) / 2 // | Transactions prioritized by fee | | // | until <= cfg.TxMinFreeFee | | // | | | @@ -366,36 +968,134 @@ func medianAdjustedTime(chainState *chainState, timeSource blockchain.MedianTime // | transactions (while block size | | // | <= cfg.BlockMinSize) | | // ----------------------------------- -- -func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockTemplate, error) { +// +// TODO - DECRED +// We also need to include a stake tx tree that looks like the following: +// +// ----------------------------------- -- -- +// | | | | +// | SSGen tx | | | ----- cfg.SSGenAllocatedSize ? +// | | | | +// |-----------------------------------| | -- +// | | | +// | SStx tx | |--- (cfg.BlockMaxSize) / 2 +// | | | +// |-----------------------------------| | +// | | | +// | SSRtx tx | | +// | | | +// ----------------------------------- -- +// +// This function returns nil, nil if there are not enough voters on any of +// the current top blocks to create a new block template. +func NewBlockTemplate(mempool *txMemPool, + payToAddress dcrutil.Address) (*BlockTemplate, error) { blockManager := mempool.server.blockManager timeSource := mempool.server.timeSource chainState := &blockManager.chainState // Extend the most recently known best block. + // The most recently known best block is the top block that has the most + // ssgen votes for it. We only need this after the height in which stake voting + // has kicked in. + // To figure out which block has the most ssgen votes, we need to run the + // following algorithm: + // 1. Acquire the HEAD block and all of its orphans. Record their block header + // hashes. + // 2. Create a map of [blockHeaderHash] --> [mempoolTxnList]. + // 3. for blockHeaderHash in candidateBlocks: + // if mempoolTx.StakeDesc == SSGen && + // mempoolTx.SSGenParseBlockHeader() == blockHeaderHash: + // map[blockHeaderHash].append(mempoolTx) + // 4. Check len of each map entry and store. + // 5. Query the ticketdb and check how many eligible ticket holders there are + // for the given block you are voting on. + // 6. Divide #ofvotes (len(map entry)) / totalPossibleVotes --> penalty ratio + // 7. Store penalty ratios for all block candidates. + // 8. Select the one with the largest penalty ratio (highest block reward). + // This block is then selected to build upon instead of the others, because + // it yields the greater amount of rewards. chainState.Lock() prevHash := chainState.newestHash nextBlockHeight := chainState.newestHeight + 1 + poolSize := chainState.nextPoolSize + finalState := chainState.nextFinalState + winningTickets := make([]chainhash.Hash, len(chainState.winningTickets), + len(chainState.winningTickets)) + for i, h := range chainState.winningTickets { + winningTickets[i] = h + } + missedTickets := make([]chainhash.Hash, len(chainState.missedTickets), + len(chainState.missedTickets)) + for i, h := range chainState.missedTickets { + missedTickets[i] = h + } chainState.Unlock() - // Create a standard coinbase transaction paying to the provided - // address. NOTE: The coinbase value will be updated to include the - // fees from the selected transactions later after they have actually - // been selected. It is created here to detect any errors early - // before potentially doing a lot of work below. The extra nonce helps - // ensure the transaction is not a duplicate transaction (paying the - // same value to the same public key address would otherwise be an - // identical transaction for block version 1). - extraNonce := uint64(0) - coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce) - if err != nil { - return nil, err + // Calculate the stake enabled height. + stakeValidationHeight := mempool.server.chainParams.StakeValidationHeight + + if nextBlockHeight >= stakeValidationHeight { + // Obtain the entire generation of blocks stemming from this parent. + children, err := blockManager.GetGeneration(*prevHash) + if err != nil { + return nil, miningRuleError(ErrFailedToGetGeneration, err.Error()) + } + + // Get the list of blocks that we can actually build on top of. If we're + // not currently on the block that has the most votes, switch to that + // block. + eligibleParents, err := mempool.SortParentsByVotes(*prevHash, children) + if err != nil { + if err.(MiningRuleError).GetCode() == ErrNotEnoughVoters { + minrLog.Debugf("Too few voters found on any HEAD block, " + + "recycling a parent block to mine on") + return handleTooFewVoters(nextBlockHeight, payToAddress, + mempool.server.blockManager) + } else { + minrLog.Errorf("unexpected error while sorting eligible "+ + "parents: %v", err.Error()) + } + return nil, err + } + + minrLog.Debugf("Found eligible parent %v with enough votes to build "+ + "block on, proceeding to create a new block template", + eligibleParents[0]) + + // Force a reorganization to the parent with the most votes if we need + // to. + if !eligibleParents[0].IsEqual(prevHash) { + for _, newHead := range eligibleParents { + err := blockManager.ForceReorganization(*prevHash, newHead) + if err != nil { + minrLog.Errorf("failed to reorganize to new parent: %v", err) + } + + // Check to make sure we actually have the transactions + // (votes) we need in the mempool. + voteHashes, err := mempool.GetVoteHashesForBlock(newHead) + if err != nil { + return nil, err + } + + if exist := mempool.CheckIfTxsExist(voteHashes); !exist { + continue + } else { + prevHash = &newHead + break + } + } + } } - coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight, - payToAddress) + + // Get the next required stake difficulty so we can determine SStx + // eligibility. + requiredStakeDifficulty, err := blockManager.CalcNextRequiredStakeDifficulty() if err != nil { - return nil, err + return nil, miningRuleError(ErrGetStakeDifficulty, "couldn't get "+ + "stake difficulty") } - numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx)) // Get the current memory pool transactions and create a priority queue // to hold the transactions which are ready for inclusion into a block @@ -405,6 +1105,9 @@ func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockT // whether or not there is an area allocated for high-priority // transactions. mempoolTxns := mempool.TxDescs() + // Disable sort by fee because we sort according to tx priority first, + // which is for stake. Decred TODO: Clean this up so we pile on stake tx + // first, then sort remaining tx by fee. sortedByFee := cfg.BlockPrioritySize == 0 priorityQueue := newTxPriorityQueue(len(mempoolTxns), sortedByFee) @@ -412,8 +1115,7 @@ func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockT // generated block with reserved space. Also create a transaction // store to house all of the input transactions so multiple lookups // can be avoided. - blockTxns := make([]*btcutil.Tx, 0, len(mempoolTxns)) - blockTxns = append(blockTxns, coinbaseTx) + blockTxns := make([]*dcrutil.Tx, 0, len(mempoolTxns)) blockTxStore := make(blockchain.TxStore) // dependers is used to track transactions which depend on another @@ -421,7 +1123,7 @@ func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockT // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. - dependers := make(map[wire.ShaHash]*list.List) + dependers := make(map[chainhash.Hash]*list.List) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the @@ -430,12 +1132,14 @@ func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockT // However, since the total fees aren't known yet, use a dummy value for // the coinbase fee which will be updated later. txFees := make([]int64, 0, len(mempoolTxns)) + txFeesMap := make(map[chainhash.Hash]int64) txSigOpCounts := make([]int64, 0, len(mempoolTxns)) + txSigOpCountsMap := make(map[chainhash.Hash]int64) txFees = append(txFees, -1) // Updated once known - txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) minrLog.Debugf("Considering %d mempool transactions for inclusion to "+ "new block", len(mempoolTxns)) + treeValid := mempool.IsTxTreeValid(prevHash) mempoolLoop: for _, txDesc := range mempoolTxns { @@ -458,18 +1162,50 @@ mempoolLoop: // inputs from the mempool since a transaction which depends on // other transactions in the mempool must come after those // dependencies in the final generated block. - txStore, err := blockManager.FetchTransactionStore(tx) + txStore, err := blockManager.FetchTransactionStore(tx, treeValid) if err != nil { minrLog.Warnf("Unable to fetch transaction store for "+ "tx %s: %v", tx.Sha(), err) continue } - // Setup dependencies for any transactions which reference - // other transactions in the mempool so they can be properly - // ordered below. + // Need this for a check below for stake base input, and to check + // the ticket number. + isSSGen, _ := stake.IsSSGen(tx) + + if isSSGen, _ := stake.IsSSGen(tx); isSSGen { + blockHash, blockHeight, err := stake.GetSSGenBlockVotedOn(tx) + if err != nil { // Should theoretically never fail. + minrLog.Tracef("Skipping ssgen tx %s because of failure "+ + "to extract block voting data", tx.Sha()) + continue + } + + if !((blockHash == *prevHash) && + (int64(blockHeight) == nextBlockHeight-1)) { + minrLog.Tracef("Skipping ssgen tx %s because it does "+ + "not vote on the correct block", tx.Sha()) + continue + } + } + + // Calculate the input value age sum for the transaction. This + // is comprised of the sum all of input amounts multiplied by + // their respective age (number of confirmations since the + // referenced input transaction). While doing the above, also + // setup dependencies for any transactions which reference other + // transactions in the mempool so they can be properly ordered + // below. prioItem := &txPrioItem{tx: txDesc.Tx} - for _, txIn := range tx.MsgTx().TxIn { + inputValueAge := float64(0.0) + for i, txIn := range tx.MsgTx().TxIn { + // Evaluate if this is a stakebase input or not. If it is, continue + // without evaluation of the input. + // if isStakeBase + if isSSGen && (i == 0) { + continue + } + originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index txData, exists := txStore[*originHash] @@ -493,12 +1229,13 @@ mempoolLoop: depList.PushBack(prioItem) if prioItem.dependsOn == nil { prioItem.dependsOn = make( - map[wire.ShaHash]struct{}) + map[chainhash.Hash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} - // Skip the check below. We already know the - // referenced transaction is available. + // No need to calculate or sum input value age + // for this input since it's zero due to + // the input age multiplier of 0. continue } @@ -512,22 +1249,47 @@ mempoolLoop: originIndex, originHash) continue mempoolLoop } + + // Sum the input value times age. + originTxOut := txData.Tx.MsgTx().TxOut[originIndex] + inputValue := originTxOut.Value + inputAge := nextBlockHeight - txData.BlockHeight + inputValueAge += float64(inputValue * inputAge) } // Calculate the final transaction priority using the input // value age sum as well as the adjusted transaction size. The // formula is: sum(inputValue * inputAge) / adjustedTxSize - prioItem.priority = txDesc.CurrentPriority(txStore, nextBlockHeight) + txSize := tx.MsgTx().SerializeSize() + prioItem.priority = calcPriority(tx, inputValueAge) - // Calculate the fee in Satoshi/KB. + // Votes are extremely high priority. Hackish fix, this should be + // better fixed later. Other stake transactions should next be + // added. TODO Make appropriate heaps for the other transaction + // types, or a new priority class. + if isSSGen { + prioItem.priority += (math.MaxFloat64 / 1.5) + } else if tx.Tree() == dcrutil.TxTreeStake { + // Prioritize other stake tx below this. + prioItem.priority += 1.0e9 + } + + // Calculate the fee in Atoms/KB. // NOTE: This is a more precise value than the one calculated // during calcMinRelayFee which rounds up to the nearest full // kilobyte boundary. This is beneficial since it provides an // incentive to create smaller transactions. - txSize := tx.MsgTx().SerializeSize() prioItem.feePerKB = float64(txDesc.Fee) / (float64(txSize) / 1000) prioItem.fee = txDesc.Fee + // Votes are extremely high priority. Hackish fix by Decred. + if isSSGen { + prioItem.feePerKB += (math.MaxFloat64 / 1.5) + } else if tx.Tree() == dcrutil.TxTreeStake { + // Prioritize other stake tx below this. + prioItem.feePerKB += 1.0e9 + } + // Add the transaction to the priority queue to mark it ready // for inclusion in the block unless it has dependencies. if prioItem.dependsOn == nil { @@ -546,9 +1308,32 @@ mempoolLoop: // The starting block size is the size of the block header plus the max // possible transaction count size, plus the size of the coinbase // transaction. - blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize()) - blockSigOps := numCoinbaseSigOps + blockSize := uint32(blockHeaderOverhead) + + // Guesstimate for sigops based on valid txs in loop below. This number + // tends to overestimate sigops because of the way the loop below is + // coded and the fact that tx can sometimes be removed from the tx + // trees if they fail one of the stake checks below the priorityQueue + // pop loop. This is buggy, but not catastrophic behaviour. A future + // release should fix it. TODO + blockSigOps := int64(0) totalFees := int64(0) + var minTxRelayFee dcrutil.Amount + switch { + case mempool.server.chainParams == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeMainNet + case mempool.server.chainParams == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeTestNet + default: + minTxRelayFee = minTxRelayFeeTestNet + } + + numSStx := 0 + + foundWinningTickets := make(map[chainhash.Hash]bool, len(winningTickets)) + for _, ticketHash := range winningTickets { + foundWinningTickets[ticketHash] = false + } // Choose which transactions make it into the block. for priorityQueue.Len() > 0 { @@ -557,6 +1342,15 @@ mempoolLoop: prioItem := heap.Pop(priorityQueue).(*txPrioItem) tx := prioItem.tx + // Store if this is an SStx or not. + isSStx, err := stake.IsSStx(tx) + + // Store if this is an SSGen or not. + isSSGen, err := stake.IsSSGen(tx) + + // Store if this is an SSRtx or not. + isSSRtx, err := stake.IsSSRtx(tx) + // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will // either be included or skipped, but in either case the deps @@ -564,19 +1358,44 @@ mempoolLoop: deps := dependers[*tx.Sha()] delete(dependers, *tx.Sha()) + // Skip if we already have too many SStx. + if isSStx && (numSStx > int(math.MaxUint8)) { + minrLog.Tracef("Skipping sstx %s because it would exceed "+ + "the max number of sstx allowed in a block", tx.Sha()) + logSkippedDeps(tx, deps) + continue + } + + // Skip if the SStx commit value is below the value required by the + // stake diff. + if isSStx && (tx.MsgTx().TxOut[0].Value < requiredStakeDifficulty) { + continue + } + + // Skip all missed tickets that we've never heard of. + if isSSRtx { + ticketHash := &tx.MsgTx().TxIn[0].PreviousOutPoint.Hash + + if !hashExistsInList(ticketHash, missedTickets) { + continue + } + } + // Enforce maximum block size. Also check for overflow. txSize := uint32(tx.MsgTx().SerializeSize()) blockPlusTxSize := blockSize + txSize - if blockPlusTxSize < blockSize || blockPlusTxSize >= cfg.BlockMaxSize { - minrLog.Tracef("Skipping tx %s because it would exceed "+ - "the max block size", tx.Sha()) + if blockPlusTxSize < blockSize || blockPlusTxSize >= + uint32(mempool.server.chainParams.MaximumBlockSize) { + minrLog.Tracef("Skipping tx %s (size %v) because it would exceed "+ + "the max block size; cur block size %v, cur num tx %v", tx.Sha(), + txSize, blockSize, len(blockTxns)) logSkippedDeps(tx, deps) continue } // Enforce maximum signature operations per block. Also check // for overflow. - numSigOps := int64(blockchain.CountSigOps(tx)) + numSigOps := int64(blockchain.CountSigOps(tx, false, isSSGen)) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ @@ -584,8 +1403,11 @@ mempoolLoop: logSkippedDeps(tx, deps) continue } + + // This isn't very expensive, but we do this check a number of times. + // Consider caching this in the mempool in the future. - Decred numP2SHSigOps, err := blockchain.CountP2SHSigOps(tx, false, - blockTxStore) + isSSGen, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CountP2SHSigOps: %v", tx.Sha(), err) @@ -602,10 +1424,30 @@ mempoolLoop: continue } + // Check to see if the SSGen tx actually uses a ticket that is + // valid for the next block. + if isSSGen { + if foundWinningTickets[tx.MsgTx().TxIn[1].PreviousOutPoint.Hash] { + continue + } + msgTx := tx.MsgTx() + isEligible := false + for _, sstxHash := range winningTickets { + if sstxHash.IsEqual(&msgTx.TxIn[1].PreviousOutPoint.Hash) { + isEligible = true + } + } + + if !isEligible { + continue + } + } + // Skip free transactions once the block is larger than the - // minimum block size. - if sortedByFee && prioItem.feePerKB < minTxRelayFee && - blockPlusTxSize >= cfg.BlockMinSize { + // minimum block size, except for stake transactions. + if sortedByFee && (prioItem.feePerKB < float64(minTxRelayFee)) && + (tx.Tree() != dcrutil.TxTreeStake) && + (blockPlusTxSize >= cfg.BlockMinSize) { minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+ "< minTxRelayFee %d and block size %d >= "+ @@ -647,8 +1489,11 @@ mempoolLoop: // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. - _, err = blockchain.CheckTransactionInputs(tx, nextBlockHeight, - blockTxStore) + _, err = blockchain.CheckTransactionInputs(tx, + nextBlockHeight, + blockTxStore, + false, // Don't check fraud proofs; missing ones are filled out below + mempool.server.chainParams) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CheckTransactionInputs: %v", tx.Sha(), err) @@ -676,9 +1521,17 @@ mempoolLoop: blockTxns = append(blockTxns, tx) blockSize += txSize blockSigOps += numSigOps - totalFees += prioItem.fee - txFees = append(txFees, prioItem.fee) - txSigOpCounts = append(txSigOpCounts, numSigOps) + + // Accumulate the SStxs in the block, because only 255 are allowed. + if isSStx { + numSStx++ + } + if isSSGen { + foundWinningTickets[tx.MsgTx().TxIn[1].PreviousOutPoint.Hash] = true + } + + txFeesMap[*tx.Sha()] = prioItem.fee + txSigOpCountsMap[*tx.Sha()] = numSigOps minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)", prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB) @@ -700,63 +1553,450 @@ mempoolLoop: } } + // Build tx list for stake tx. + blockTxnsStake := make([]*dcrutil.Tx, 0, len(blockTxns)) + + // Stake tx ordering in stake tree: + // 1. SSGen (votes). + // 2. SStx (fresh stake tickets). + // 3. SSRtx (revocations for missed tickets). + + // Get the block votes (SSGen tx) and store them and their number. + voters := 0 + voteBitsVoters := make([]uint16, 0) + + for _, tx := range blockTxns { + if nextBlockHeight < stakeValidationHeight { + break // No SSGen should be present before this height. + } + + if isSSGen, _ := stake.IsSSGen(tx); isSSGen { + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + if maybeInsertStakeTx(mempool, txCopy, treeValid) { + vb := stake.GetSSGenVoteBits(txCopy) + voteBitsVoters = append(voteBitsVoters, vb) + blockTxnsStake = append(blockTxnsStake, txCopy) + voters++ + } + } + + // Don't let this overflow, although probably it's impossible. + if voters >= math.MaxUint16 { + break + } + } + + // Set votebits, which determines whether the TxTreeRegular of the previous + // block is valid or not. + var votebits uint16 + if nextBlockHeight < stakeValidationHeight { + votebits = uint16(0x0001) // TxTreeRegular enabled pre-staking + } else { + // Otherwise, we need to check the votes to determine if the tx tree was + // validated or not. + voteYea := 0 + totalVotes := 0 + + for _, vb := range voteBitsVoters { + if dcrutil.IsFlagSet16(vb, dcrutil.BlockValid) { + voteYea++ + } + totalVotes++ + } + + if voteYea == 0 { // Handle zero case for div by zero error prevention. + votebits = uint16(0x0000) // TxTreeRegular disabled + } else if (totalVotes / voteYea) <= 1 { + votebits = uint16(0x0001) // TxTreeRegular enabled + } else { + votebits = uint16(0x0000) // TxTreeRegular disabled + } + + if votebits == uint16(0x0000) { + // In the event TxTreeRegular is disabled, we need to remove all tx + // in the current block that depend on tx from the TxTreeRegular of + // the previous block. + // DECRED WARNING: The ideal behaviour should also be that we re-add + // all tx that we just removed from the previous block into our + // current block template. Right now this code fails to do that; + // these tx will then be included in the next block, which isn't + // catastrophic but is kind of buggy. + + // Retrieve the current top block, whose TxTreeRegular was voted + // out. + // Decred TODO: This is super inefficient, this block should be + // cached and stored somewhere. + topBlock, err := blockManager.GetTopBlockFromChain() + if err != nil { + return nil, miningRuleError(ErrGetTopBlock, "couldn't get "+ + "top block") + } + topBlockRegTx := topBlock.Transactions() + + tempBlockTxns := make([]*dcrutil.Tx, 0, len(mempoolTxns)) + for _, tx := range blockTxns { + if tx.Tree() == dcrutil.TxTreeRegular { + // Go through all the inputs and check to see if this mempool + // tx uses outputs from the parent block. This loop is + // probably very expensive. + isValid := true + for _, txIn := range tx.MsgTx().TxIn { + for _, parentTx := range topBlockRegTx { + if txIn.PreviousOutPoint.Hash.IsEqual( + parentTx.Sha()) { + isValid = false + } + } + } + + if isValid { + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + tempBlockTxns = append(tempBlockTxns, txCopy) + } + } else { + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + tempBlockTxns = append(tempBlockTxns, txCopy) + } + } + + // Replace blockTxns with the pruned list of valid mempool tx. + blockTxns = tempBlockTxns + } + } + + // Get the newly purchased tickets (SStx tx) and store them and their number. + freshStake := 0 + for _, tx := range blockTxns { + isSStx, _ := stake.IsSStx(tx) + if tx.Tree() == dcrutil.TxTreeStake && isSStx { + // A ticket can not spend an input from TxTreeRegular, since it + // has not yet been validated. + if containsTxIns(blockTxns, tx) { + continue + } + + // Quick check for difficulty here. + if tx.MsgTx().TxOut[0].Value >= requiredStakeDifficulty { + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + if maybeInsertStakeTx(mempool, txCopy, treeValid) { + blockTxnsStake = append(blockTxnsStake, txCopy) + freshStake++ + } + } + } + + // Don't let this overflow. + if freshStake >= int(mempool.server.chainParams.MaxFreshStakePerBlock) { + break + } + } + + // Get the ticket revocations (SSRtx tx) and store them and their number. + revocations := 0 + for _, tx := range blockTxns { + if nextBlockHeight < stakeValidationHeight { + break // No SSRtx should be present before this height. + } + + isSSRtx, _ := stake.IsSSRtx(tx) + if tx.Tree() == dcrutil.TxTreeStake && isSSRtx { + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + if maybeInsertStakeTx(mempool, txCopy, treeValid) { + blockTxnsStake = append(blockTxnsStake, txCopy) + revocations++ + } + } + + // Don't let this overflow. + if revocations >= math.MaxUint8 { + break + } + } + + // Create a standard coinbase transaction paying to the provided + // address. NOTE: The coinbase value will be updated to include the + // fees from the selected transactions later after they have actually + // been selected. It is created here to detect any errors early + // before potentially doing a lot of work below. The extra nonce helps + // ensure the transaction is not a duplicate transaction (paying the + // same value to the same public key address would otherwise be an + // identical transaction for block version 1). + // Decred: We need to move this downwards because of the requirements + // to incorporate voters and potential voters. + coinbaseScript := []byte{0x00, 0x00} + coinbaseScript = append(coinbaseScript, []byte(coinbaseFlags)...) + + // Add a random coinbase nonce to ensure that tx prefix hash + // so that our merkle root is unique for lookups needed for + // getwork, etc. + rand, err := wire.RandomUint64() + if err != nil { + return nil, err + } + opReturnPkScript, err := standardCoinbaseOpReturn(uint32(nextBlockHeight), + []uint64{0, 0, 0, rand}) + if err != nil { + return nil, err + } + coinbaseTx, err := createCoinbaseTx(coinbaseScript, + opReturnPkScript, + nextBlockHeight, + payToAddress, + uint16(voters), + mempool.server.chainParams) + if err != nil { + return nil, err + } + + coinbaseTx.SetTree(dcrutil.TxTreeRegular) // Coinbase only in regular tx tree + if err != nil { + return nil, err + } + numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx, true, false)) + blockSize += uint32(coinbaseTx.MsgTx().SerializeSize()) + blockSigOps += numCoinbaseSigOps + txFeesMap[*coinbaseTx.Sha()] = 0 + txSigOpCountsMap[*coinbaseTx.Sha()] = numCoinbaseSigOps + + // Build tx lists for regular tx. + blockTxnsRegular := make([]*dcrutil.Tx, 0, len(blockTxns)+1) + + // Append coinbase. + blockTxnsRegular = append(blockTxnsRegular, coinbaseTx) + + // Assemble the two transaction trees. + for _, tx := range blockTxns { + if tx.Tree() == dcrutil.TxTreeRegular { + blockTxnsRegular = append(blockTxnsRegular, tx) + } else if tx.Tree() == dcrutil.TxTreeStake { + continue + } else { + minrLog.Tracef("Error adding tx %s to block; invalid tree", tx.Sha()) + continue + } + } + + for _, tx := range blockTxnsRegular { + fee, ok := txFeesMap[*tx.Sha()] + if !ok { + return nil, fmt.Errorf("couldn't find fee for tx %v", + *tx.Sha()) + } + totalFees += fee + txFees = append(txFees, fee) + + tsos, ok := txSigOpCountsMap[*tx.Sha()] + if !ok { + return nil, fmt.Errorf("couldn't find sig ops count for tx %v", + *tx.Sha()) + } + txSigOpCounts = append(txSigOpCounts, tsos) + } + + for _, tx := range blockTxnsStake { + fee, ok := txFeesMap[*tx.Sha()] + if !ok { + return nil, fmt.Errorf("couldn't find fee for stx %v", + *tx.Sha()) + } + totalFees += fee + txFees = append(txFees, fee) + + tsos, ok := txSigOpCountsMap[*tx.Sha()] + if !ok { + return nil, fmt.Errorf("couldn't find sig ops count for stx %v", + *tx.Sha()) + } + txSigOpCounts = append(txSigOpCounts, tsos) + } + + txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) + + // If we're greater than or equal to stake validation height, scale the + // fees according to the number of voters. + totalFees *= int64(voters) + totalFees /= int64(mempool.server.chainParams.TicketsPerBlock) + // Now that the actual transactions have been selected, update the // block size for the real transaction count and coinbase value with // the total fees accordingly. - blockSize -= wire.MaxVarIntPayload - - uint32(wire.VarIntSerializeSize(uint64(len(blockTxns)))) - coinbaseTx.MsgTx().TxOut[0].Value += totalFees - txFees[0] = -totalFees + if nextBlockHeight > 1 { + blockSize -= wire.MaxVarIntPayload - + uint32(wire.VarIntSerializeSize(uint64(len(blockTxnsRegular))+ + uint64(len(blockTxnsStake)))) + coinbaseTx.MsgTx().TxOut[2].Value += totalFees + txFees[0] = -totalFees + } // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. ts, err := medianAdjustedTime(chainState, timeSource) if err != nil { - return nil, err + return nil, miningRuleError(ErrGettingMedianTime, err.Error()) } + requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts) if err != nil { - return nil, err + return nil, miningRuleError(ErrGettingDifficulty, err.Error()) + } + + // Return nil if we don't yet have enough voters; sometimes it takes a + // bit for the mempool to sync with the votes map and we end up down + // here despite having the relevant votes available in the votes map. + minimumVotesRequired := + int((mempool.server.chainParams.TicketsPerBlock / 2) + 1) + if nextBlockHeight >= stakeValidationHeight && + voters < minimumVotesRequired { + minrLog.Debugf("incongruent number of voters in mempool " + + "vs mempool.voters; not enough voters found") + return nil, nil + } + + // Correct transaction index fraud proofs for any transactions that + // are chains. maybeInsertStakeTx fills this in for stake transactions + // already, so only do it for regular transactions. + for i, tx := range blockTxnsRegular { + txs, err := blockManager.FetchTransactionStore(tx, treeValid) + if err != nil { + str := fmt.Sprintf("failed to fetch tx store for tx %v", + tx.Sha()) + return nil, miningRuleError(ErrFetchTxStore, str) + } + + // Copy the transaction and swap the pointer. + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + blockTxnsRegular[i] = txCopy + tx = txCopy + + for _, txIn := range tx.MsgTx().TxIn { + originHash := &txIn.PreviousOutPoint.Hash + txData, exists := txs[*originHash] + if !exists || txData.Err != nil || txData.Tx == nil { + // Set a flag with the index so we can properly set + // the fraud proof below. + txIn.BlockIndex = wire.NullBlockIndex + } else { + originIdx := txIn.PreviousOutPoint.Index + txIn.ValueIn = txData.Tx.MsgTx().TxOut[originIdx].Value + txIn.BlockHeight = uint32(txData.BlockHeight) + txIn.BlockIndex = txData.BlockIndex + } + } + } + + // Fill in locally referenced inputs. + for i, tx := range blockTxnsRegular { + // Skip coinbase. + if i == 0 { + continue + } + + // Copy the transaction and swap the pointer. + txCopy := dcrutil.NewTxDeepTxIns(tx.MsgTx()) + blockTxnsRegular[i] = txCopy + tx = txCopy + + for _, txIn := range tx.MsgTx().TxIn { + // This tx was at some point 0-conf and now requires the + // correct block height and index. Set it here. + if txIn.BlockIndex == wire.NullBlockIndex { + idx := txIndexFromTxList(txIn.PreviousOutPoint.Hash, + blockTxnsRegular) + + // The input is in the block, set it accordingly. + if idx != -1 { + originIdx := txIn.PreviousOutPoint.Index + amt := blockTxnsRegular[idx].MsgTx().TxOut[originIdx].Value + txIn.ValueIn = amt + txIn.BlockHeight = uint32(nextBlockHeight) + txIn.BlockIndex = uint32(idx) + } else { + str := fmt.Sprintf("failed find hash in tx list "+ + "for fraud proof; tx in hash %v", + txIn.PreviousOutPoint.Hash) + return nil, miningRuleError(ErrFraudProofIndex, str) + } + } + } } // Create a new block ready to be solved. - merkles := blockchain.BuildMerkleTreeStore(blockTxns) + merkles := blockchain.BuildMerkleTreeStore(blockTxnsRegular) + merklesStake := blockchain.BuildMerkleTreeStore(blockTxnsStake) + var msgBlock wire.MsgBlock msgBlock.Header = wire.BlockHeader{ - Version: generatedBlockVersion, - PrevBlock: *prevHash, - MerkleRoot: *merkles[len(merkles)-1], - Timestamp: ts, - Bits: requiredDifficulty, + Version: mempool.server.chainParams.CurrentBlockVersion, + PrevBlock: *prevHash, + MerkleRoot: *merkles[len(merkles)-1], + StakeRoot: *merklesStake[len(merklesStake)-1], + VoteBits: votebits, + FinalState: finalState, + Voters: uint16(voters), + FreshStake: uint8(freshStake), + Revocations: uint8(revocations), + PoolSize: poolSize, + Timestamp: ts, + SBits: requiredStakeDifficulty, + Bits: requiredDifficulty, + Height: uint32(nextBlockHeight), + // Size declared below } - for _, tx := range blockTxns { + + for _, tx := range blockTxnsRegular { if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil { - return nil, err + return nil, miningRuleError(ErrTransactionAppend, err.Error()) } } + for _, tx := range blockTxnsStake { + if err := msgBlock.AddSTransaction(tx.MsgTx()); err != nil { + return nil, miningRuleError(ErrTransactionAppend, err.Error()) + } + } + + msgBlock.Header.Size = uint32(msgBlock.SerializeSize()) + // Finally, perform a full check on the created block against the chain // consensus rules to ensure it properly connects to the current best // chain with no issues. - block := btcutil.NewBlock(&msgBlock) + block := dcrutil.NewBlockDeepCopyCoinbase(&msgBlock) block.SetHeight(nextBlockHeight) - if err := blockManager.CheckConnectBlock(block); err != nil { - return nil, err + + if err := blockchain.CheckWorklessBlockSanity(block, + mempool.server.timeSource, + mempool.server.chainParams); err != nil { + str := fmt.Sprintf("failed to do final check for block workless "+ + "sanity when making new block template: %v", + err.Error()) + return nil, miningRuleError(ErrCheckConnectBlock, str) } - minrLog.Debugf("Created new block template (%d transactions, %d in "+ - "fees, %d signature operations, %d bytes, target difficulty "+ - "%064x)", len(msgBlock.Transactions), totalFees, blockSigOps, - blockSize, blockchain.CompactToBig(msgBlock.Header.Bits)) + if err := blockManager.CheckConnectBlock(block); err != nil { + str := fmt.Sprintf("failed to do final check for check connect "+ + "block when making new block template: %v", + err.Error()) + return nil, miningRuleError(ErrCheckConnectBlock, str) + } - return &BlockTemplate{ + minrLog.Debugf("Created new block template (%d transactions, %d "+ + "stake transactions, %d in fees, %d signature operations, "+ + "%d bytes, target difficulty %064x, stake difficulty %v)", + len(msgBlock.Transactions), len(msgBlock.STransactions), + totalFees, blockSigOps, blockSize, + blockchain.CompactToBig(msgBlock.Header.Bits), + dcrutil.Amount(msgBlock.Header.SBits).ToCoin()) + + blockTemplate := &BlockTemplate{ block: &msgBlock, fees: txFees, sigOpCounts: txSigOpCounts, height: nextBlockHeight, validPayAddress: payToAddress != nil, - }, nil + } + + return handleCreatedBlockTemplate(blockTemplate, mempool.server.blockManager) } // UpdateBlockTime updates the timestamp in the header of the passed block to @@ -772,7 +2012,7 @@ func UpdateBlockTime(msgBlock *wire.MsgBlock, bManager *blockManager) error { newTimestamp, err := medianAdjustedTime(&bManager.chainState, bManager.server.timeSource) if err != nil { - return err + return miningRuleError(ErrGettingMedianTime, err.Error()) } msgBlock.Header.Timestamp = newTimestamp @@ -781,38 +2021,10 @@ func UpdateBlockTime(msgBlock *wire.MsgBlock, bManager *blockManager) error { if activeNetParams.ResetMinDifficulty { difficulty, err := bManager.CalcNextRequiredDifficulty(newTimestamp) if err != nil { - return err + return miningRuleError(ErrGettingDifficulty, err.Error()) } msgBlock.Header.Bits = difficulty } return nil } - -// UpdateExtraNonce updates the extra nonce in the coinbase script of the passed -// block by regenerating the coinbase script with the passed value and block -// height. It also recalculates and updates the new merkle root that results -// from changing the coinbase script. -func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int64, extraNonce uint64) error { - coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce) - if err != nil { - return err - } - if len(coinbaseScript) > blockchain.MaxCoinbaseScriptLen { - return fmt.Errorf("coinbase transaction script length "+ - "of %d is out of range (min: %d, max: %d)", - len(coinbaseScript), blockchain.MinCoinbaseScriptLen, - blockchain.MaxCoinbaseScriptLen) - } - msgBlock.Transactions[0].TxIn[0].SignatureScript = coinbaseScript - - // TODO(davec): A btcutil.Block should use saved in the state to avoid - // recalculating all of the other transaction hashes. - // block.Transactions[0].InvalidateCache() - - // Recalculate the merkle root with the updated extra nonce. - block := btcutil.NewBlock(msgBlock) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) - msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] - return nil -} diff --git a/mining_test.go b/mining_test.go new file mode 100644 index 00000000..e7867cea --- /dev/null +++ b/mining_test.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015 The Decred Developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "testing" +) + +func MineTest(t *testing.T) { + fmt.Println("Hello World!") +} diff --git a/miningerror.go b/miningerror.go new file mode 100644 index 00000000..39f7a267 --- /dev/null +++ b/miningerror.go @@ -0,0 +1,117 @@ +// Copyright (c) 2015 The Decred developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" +) + +// MiningErrorCode identifies a kind of error. +type MiningErrorCode int + +// These constants are used to identify a specific RuleError. +const ( + // ErrNotEnoughVoters indicates that there were not enough voters to + // build a block on top of HEAD. + ErrNotEnoughVoters MiningErrorCode = iota + + // ErrFailedToGetGeneration specifies that the current generation for + // a block could not be obtained from blockchain. + ErrFailedToGetGeneration + + // ErrGetStakeDifficulty indicates that the current stake difficulty + // could not be obtained. + ErrGetStakeDifficulty + + // ErrGetStakeDifficulty indicates that the current top block of the + // blockchain could not be obtained. + ErrGetTopBlock + + // ErrCreatingCoinbase indicates that there was a problem generating + // the coinbase. + ErrCreatingCoinbase + + // ErrGettingMedianTime indicates that the server was unable to get the + // median adjusted time for the network. + ErrGettingMedianTime + + // ErrGettingDifficulty indicates that there was an error getting the + // PoW difficulty. + ErrGettingDifficulty + + // ErrTransactionAppend indicates there was a problem adding a msgtx + // to a msgblock. + ErrTransactionAppend + + // ErrCheckConnectBlock indicates that a newly created block template + // failed blockchain.CheckBlockSanity. + ErrCheckBlockSanity + + // ErrCheckConnectBlock indicates that a newly created block template + // failed blockchain.CheckConnectBlock. + ErrCheckConnectBlock + + // ErrCoinbaseLengthOverflow indicates that a coinbase length was overflowed, + // probably of a result of incrementing extranonce. + ErrCoinbaseLengthOverflow + + // ErrFraudProofIndex indicates that there was an error finding the index + // for a fraud proof. + ErrFraudProofIndex + + // ErrFetchTxStore indicates a transaction store failed to fetch. + ErrFetchTxStore +) + +// Map of MiningErrorCode values back to their constant names for pretty printing. +var miningErrorCodeStrings = map[MiningErrorCode]string{ + ErrNotEnoughVoters: "ErrNotEnoughVoters", + ErrFailedToGetGeneration: "ErrFailedToGetGeneration", + ErrGetStakeDifficulty: "ErrGetStakeDifficulty", + ErrGetTopBlock: "ErrGetTopBlock", + ErrCreatingCoinbase: "ErrCreatingCoinbase", + ErrGettingMedianTime: "ErrGettingMedianTime", + ErrGettingDifficulty: "ErrGettingDifficulty", + ErrTransactionAppend: "ErrTransactionAppend", + ErrCheckBlockSanity: "ErrCheckBlockSanity", + ErrCheckConnectBlock: "ErrCheckConnectBlock", + ErrCoinbaseLengthOverflow: "ErrCoinbaseLengthOverflow", + ErrFraudProofIndex: "ErrFraudProofIndex", + ErrFetchTxStore: "ErrFetchTxStore", +} + +// String returns the MiningErrorCode as a human-readable name. +func (e MiningErrorCode) String() string { + if s := miningErrorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown MiningErrorCode (%d)", int(e)) +} + +// RuleError identifies a rule violation. It is used to indicate that +// processing of a block or transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation and access the MiningErrorCode field to +// ascertain the specific reason for the rule violation. +type MiningRuleError struct { + ErrorCode MiningErrorCode // Describes the kind of error + Description string // Human readable description of the issue +} + +// Error satisfies the error interface and prints human-readable errors. +func (e MiningRuleError) Error() string { + return e.Description +} + +// Error satisfies the error interface and prints human-readable errors. +func (e MiningRuleError) GetCode() MiningErrorCode { + return e.ErrorCode +} + +// ruleError creates an RuleError given a set of arguments. +func miningRuleError(c MiningErrorCode, desc string) MiningRuleError { + return MiningRuleError{ErrorCode: c, Description: desc} +} diff --git a/mruinvmap.go b/mruinvmap.go index dba371a5..281da9b1 100644 --- a/mruinvmap.go +++ b/mruinvmap.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "container/list" "fmt" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // MruInventoryMap provides a map that is limited to a maximum number of items diff --git a/mruinvmap_test.go b/mruinvmap_test.go index f9a19907..3b0351f3 100644 --- a/mruinvmap_test.go +++ b/mruinvmap_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,8 @@ import ( "crypto/rand" "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // BenchmarkMruInventoryList performs basic benchmarks on the most recently @@ -20,9 +22,9 @@ func BenchmarkMruInventoryList(b *testing.B) { numInvVects := 100000 invVects := make([]*wire.InvVect, 0, numInvVects) for i := 0; i < numInvVects; i++ { - hashBytes := make([]byte, wire.HashSize) + hashBytes := make([]byte, chainhash.HashSize) rand.Read(hashBytes) - hash, _ := wire.NewShaHash(hashBytes) + hash, _ := chainhash.NewHash(hashBytes) iv := wire.NewInvVect(wire.InvTypeBlock, hash) invVects = append(invVects, iv) } diff --git a/mrunoncemap.go b/mrunoncemap.go new file mode 100644 index 00000000..64965c1e --- /dev/null +++ b/mrunoncemap.go @@ -0,0 +1,130 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "container/list" + "fmt" + "sync" +) + +// mruNonceMap provides a concurrency safe map that is limited to a maximum +// number of items with eviction for the oldest entry when the limit is +// exceeded. +type mruNonceMap struct { + mtx sync.Mutex + nonceMap map[uint64]*list.Element // nearly O(1) lookups + nonceList *list.List // O(1) insert, update, delete + limit uint +} + +// String returns the map as a human-readable string. +// +// This function is safe for concurrent access. +func (m *mruNonceMap) String() string { + m.mtx.Lock() + defer m.mtx.Unlock() + + lastEntryNum := len(m.nonceMap) - 1 + curEntry := 0 + buf := bytes.NewBufferString("[") + for nonce := range m.nonceMap { + buf.WriteString(fmt.Sprintf("%d", nonce)) + if curEntry < lastEntryNum { + buf.WriteString(", ") + } + curEntry++ + } + buf.WriteString("]") + + return fmt.Sprintf("<%d>%s", m.limit, buf.String()) +} + +// Exists returns whether or not the passed nonce is in the map. +// +// This function is safe for concurrent access. +func (m *mruNonceMap) Exists(nonce uint64) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + if _, exists := m.nonceMap[nonce]; exists { + return true + } + return false +} + +// Add adds the passed nonce to the map and handles eviction of the oldest item +// if adding the new item would exceed the max limit. Adding an existing item +// makes it the most recently used item. +// +// This function is safe for concurrent access. +func (m *mruNonceMap) Add(nonce uint64) { + m.mtx.Lock() + defer m.mtx.Unlock() + + // When the limit is zero, nothing can be added to the map, so just + // return. + if m.limit == 0 { + return + } + + // When the entry already exists move it to the front of the list + // thereby marking it most recently used. + if node, exists := m.nonceMap[nonce]; exists { + m.nonceList.MoveToFront(node) + return + } + + // Evict the least recently used entry (back of the list) if the the new + // entry would exceed the size limit for the map. Also reuse the list + // node so a new one doesn't have to be allocated. + if uint(len(m.nonceMap))+1 > m.limit { + node := m.nonceList.Back() + lru := node.Value.(uint64) + + // Evict least recently used item. + delete(m.nonceMap, lru) + + // Reuse the list node of the item that was just evicted for the + // new item. + node.Value = nonce + m.nonceList.MoveToFront(node) + m.nonceMap[nonce] = node + return + } + + // The limit hasn't been reached yet, so just add the new item. + node := m.nonceList.PushFront(nonce) + m.nonceMap[nonce] = node + return +} + +// Delete deletes the passed nonce from the map (if it exists). +// +// This function is safe for concurrent access. +func (m *mruNonceMap) Delete(nonce uint64) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if node, exists := m.nonceMap[nonce]; exists { + m.nonceList.Remove(node) + delete(m.nonceMap, nonce) + } +} + +// newMruNonceMap returns a new nonce map that is limited to the number of +// entries specified by limit. When the number of entries exceeds the limit, +// the oldest (least recently used) entry will be removed to make room for the +// new entry. +func newMruNonceMap(limit uint) *mruNonceMap { + m := mruNonceMap{ + nonceMap: make(map[uint64]*list.Element), + nonceList: list.New(), + limit: limit, + } + return &m +} diff --git a/mrunoncemap_test.go b/mrunoncemap_test.go new file mode 100644 index 00000000..687ec3b4 --- /dev/null +++ b/mrunoncemap_test.go @@ -0,0 +1,153 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "testing" +) + +// TestMruNonceMap ensures the mruNonceMap behaves as expected including +// limiting, eviction of least-recently used entries, specific entry removal, +// and existence tests. +func TestMruNonceMap(t *testing.T) { + // Create a bunch of fake nonces to use in testing the mru nonce code. + numNonces := 10 + nonces := make([]uint64, 0, numNonces) + for i := 0; i < numNonces; i++ { + nonces = append(nonces, uint64(i)) + } + + tests := []struct { + name string + limit int + }{ + {name: "limit 0", limit: 0}, + {name: "limit 1", limit: 1}, + {name: "limit 5", limit: 5}, + {name: "limit 7", limit: 7}, + {name: "limit one less than available", limit: numNonces - 1}, + {name: "limit all available", limit: numNonces}, + } + +testLoop: + for i, test := range tests { + // Create a new mru nonce map limited by the specified test + // limit and add all of the test nonces. This will cause + // evicition since there are more test nonces than the limits. + mruNonceMap := newMruNonceMap(uint(test.limit)) + for j := 0; j < numNonces; j++ { + mruNonceMap.Add(nonces[j]) + } + + // Ensure the limited number of most recent entries in the list + // exist. + for j := numNonces - test.limit; j < numNonces; j++ { + if !mruNonceMap.Exists(nonces[j]) { + t.Errorf("Exists #%d (%s) entry %d does not "+ + "exist", i, test.name, nonces[j]) + continue testLoop + } + } + + // Ensure the entries before the limited number of most recent + // entries in the list do not exist. + for j := 0; j < numNonces-test.limit; j++ { + if mruNonceMap.Exists(nonces[j]) { + t.Errorf("Exists #%d (%s) entry %d exists", i, + test.name, nonces[j]) + continue testLoop + } + } + + // Readd the entry that should currently be the least-recently + // used entry so it becomes the most-recently used entry, then + // force an eviction by adding an entry that doesn't exist and + // ensure the evicted entry is the new least-recently used + // entry. + // + // This check needs at least 2 entries. + if test.limit > 1 { + origLruIndex := numNonces - test.limit + mruNonceMap.Add(nonces[origLruIndex]) + + mruNonceMap.Add(uint64(numNonces) + 1) + + // Ensure the original lru entry still exists since it + // was updated and should've have become the mru entry. + if !mruNonceMap.Exists(nonces[origLruIndex]) { + t.Errorf("MRU #%d (%s) entry %d does not exist", + i, test.name, nonces[origLruIndex]) + continue testLoop + } + + // Ensure the entry that should've become the new lru + // entry was evicted. + newLruIndex := origLruIndex + 1 + if mruNonceMap.Exists(nonces[newLruIndex]) { + t.Errorf("MRU #%d (%s) entry %d exists", i, + test.name, nonces[newLruIndex]) + continue testLoop + } + } + + // Delete all of the entries in the list, including those that + // don't exist in the map, and ensure they no longer exist. + for j := 0; j < numNonces; j++ { + mruNonceMap.Delete(nonces[j]) + if mruNonceMap.Exists(nonces[j]) { + t.Errorf("Delete #%d (%s) entry %d exists", i, + test.name, nonces[j]) + continue testLoop + } + } + } +} + +// TestMruNonceMapStringer tests the stringized output for the mruNonceMap type. +func TestMruNonceMapStringer(t *testing.T) { + // Create a couple of fake nonces to use in testing the mru nonce + // stringer code. + nonce1 := uint64(10) + nonce2 := uint64(20) + + // Create new mru nonce map and add the nonces. + mruNonceMap := newMruNonceMap(uint(2)) + mruNonceMap.Add(nonce1) + mruNonceMap.Add(nonce2) + + // Ensure the stringer gives the expected result. Since map iteration + // is not ordered, either entry could be first, so account for both + // cases. + wantStr1 := fmt.Sprintf("<%d>[%d, %d]", 2, nonce1, nonce2) + wantStr2 := fmt.Sprintf("<%d>[%d, %d]", 2, nonce2, nonce1) + gotStr := mruNonceMap.String() + if gotStr != wantStr1 && gotStr != wantStr2 { + t.Fatalf("unexpected string representation - got %q, want %q "+ + "or %q", gotStr, wantStr1, wantStr2) + } +} + +// BenchmarkMruNonceList performs basic benchmarks on the most recently used +// nonce handling. +func BenchmarkMruNonceList(b *testing.B) { + // Create a bunch of fake nonces to use in benchmarking the mru nonce + // code. + b.StopTimer() + numNonces := 100000 + nonces := make([]uint64, 0, numNonces) + for i := 0; i < numNonces; i++ { + nonces = append(nonces, uint64(i)) + } + b.StartTimer() + + // Benchmark the add plus evicition code. + limit := 20000 + mruNonceMap := newMruNonceMap(uint(limit)) + for i := 0; i < b.N; i++ { + mruNonceMap.Add(nonces[i%numNonces]) + } +} diff --git a/params.go b/params.go index ff4944d4..16712c03 100644 --- a/params.go +++ b/params.go @@ -1,16 +1,17 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/wire" ) // activeNetParams is a pointer to the parameters specific to the -// currently active bitcoin network. +// currently active decred network. var activeNetParams = &mainNetParams // params is used to group parameters for various networks such as the main @@ -23,45 +24,32 @@ type params struct { // mainNetParams contains parameters specific to the main network // (wire.MainNet). NOTE: The RPC port is intentionally different than the -// reference implementation because btcd does not handle wallet requests. The +// reference implementation because dcrd does not handle wallet requests. The // separate wallet process listens on the well-known port and forwards requests -// it does not handle on to btcd. This approach allows the wallet process +// it does not handle on to dcrd. This approach allows the wallet process // to emulate the full reference implementation RPC API. var mainNetParams = params{ Params: &chaincfg.MainNetParams, - rpcPort: "8334", + rpcPort: "9109", dnsSeeds: []string{ - "seed.bitcoin.sipa.be", - "dnsseed.bluematt.me", - "dnsseed.bitcoin.dashjr.org", - "seed.bitcoinstats.com", - "seed.bitnodes.io", - "bitseed.xf2.org", - "seeds.bitcoin.open-nodes.org", + "mainnet-seed.decred.mindcry.org", + "mainnet-seed.decred.netpurgatory.com", + "mainnet.decredseed.org", + "mainnet-seed.decred.org", }, } -// regressionNetParams contains parameters specific to the regression test -// network (wire.TestNet). NOTE: The RPC port is intentionally different -// than the reference implementation - see the mainNetParams comment for -// details. -var regressionNetParams = params{ - Params: &chaincfg.RegressionNetParams, - rpcPort: "18334", - dnsSeeds: []string{}, -} - -// testNet3Params contains parameters specific to the test network (version 3) -// (wire.TestNet3). NOTE: The RPC port is intentionally different than the +// testNetParams contains parameters specific to the test network (version 0) +// (wire.TestNet). NOTE: The RPC port is intentionally different than the // reference implementation - see the mainNetParams comment for details. -var testNet3Params = params{ - Params: &chaincfg.TestNet3Params, - rpcPort: "18334", +var testNetParams = params{ + Params: &chaincfg.TestNetParams, + rpcPort: "19109", dnsSeeds: []string{ - "testnet-seed.alexykot.me", - "testnet-seed.bitcoin.schildbach.de", - "testnet-seed.bitcoin.petertodd.org", - "testnet-seed.bluematt.me", + "testnet-seed.decred.mindcry.org", + "testnet-seed.decred.netpurgatory.com", + "testnet.decredseed.org", + "testnet-seed.decred.org", }, } @@ -69,22 +57,22 @@ var testNet3Params = params{ // (wire.SimNet). var simNetParams = params{ Params: &chaincfg.SimNetParams, - rpcPort: "18556", + rpcPort: "19556", dnsSeeds: []string{}, // NOTE: There must NOT be any seeds. } -// netName returns the name used when referring to a bitcoin network. At the -// time of writing, btcd currently places blocks for testnet version 3 in the +// netName returns the name used when referring to a decred network. At the +// time of writing, dcrd currently places blocks for testnet version 0 in the // data and log directory "testnet", which does not match the Name field of the -// chaincfg parameters. This function can be used to override this directory -// name as "testnet" when the passed active network matches wire.TestNet3. +// chaincfg parameters. This function can be used to override this directory name +// as "testnet" when the passed active network matches wire.TestNet. // // A proper upgrade to move the data and log directories for this network to -// "testnet3" is planned for the future, at which point this function can be +// "testnet" is planned for the future, at which point this function can be // removed and the network parameter's name used instead. func netName(chainParams *params) string { switch chainParams.Net { - case wire.TestNet3: + case wire.TestNet: return "testnet" default: return chainParams.Name diff --git a/peer.go b/peer.go index 16047daa..16f8a838 100644 --- a/peer.go +++ b/peer.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,6 +8,7 @@ package main import ( "bytes" "container/list" + "errors" "fmt" "io" prand "math/rand" @@ -16,22 +18,24 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcd/addrmgr" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" - "github.com/btcsuite/btcutil/bloom" "github.com/btcsuite/go-socks/socks" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" + "github.com/decred/dcrutil/bloom" ) const ( // maxProtocolVersion is the max protocol version the peer supports. - maxProtocolVersion = 70002 + maxProtocolVersion = 1 // outputBufferSize is the number of elements the output channels use. - outputBufferSize = 50 + outputBufferSize = 5000 // invTrickleSize is the maximum amount of inventory to send in a single // message when trickling inventory to remote peers. @@ -53,6 +57,9 @@ const ( // pingTimeoutMinutes is the number of minutes since we last sent a // message requiring a reply before we will ping a host. pingTimeoutMinutes = 2 + + // queueEmptyFrequency is the frequency for the emptying of the queue. + queueEmptyFrequency = 500 * time.Millisecond ) var ( @@ -61,16 +68,21 @@ var ( nodeCount int32 // userAgentName is the user agent name and is used to help identify - // ourselves to other bitcoin peers. - userAgentName = "btcd" + // ourselves to other decred peers. + userAgentName = "dcrd" // userAgentVersion is the user agent version and is used to help - // identify ourselves to other bitcoin peers. + // identify ourselves to other decred peers. userAgentVersion = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch) + + // allowSelfConns is only used to allow the tests to bypass the self + // connection detecting and disconnect logic since they intentionally + // do so for testing purposes. + allowSelfConns bool ) // zeroHash is the zero value hash (all zeros). It is defined as a convenience. -var zeroHash wire.ShaHash +var zeroHash chainhash.Hash // minUint32 is a helper function to return the minimum of two uint32s. // This avoids a math import and the need to cast to floats. @@ -82,7 +94,7 @@ func minUint32(a, b uint32) uint32 { } // newNetAddress attempts to extract the IP address and port from the passed -// net.Addr interface and create a bitcoin NetAddress structure using that +// net.Addr interface and create a decred NetAddress structure using that // information. func newNetAddress(addr net.Addr, services wire.ServiceFlag) (*wire.NetAddress, error) { // addr will be a net.TCPAddr when not using a proxy. @@ -128,7 +140,7 @@ type outMsg struct { doneChan chan struct{} } -// peer provides a bitcoin peer for handling bitcoin communications. The +// peer provides a decred peer for handling decred communications. The // overall data flow is split into 3 goroutines and a separate block manager. // Inbound messages are read via the inHandler goroutine and generally // dispatched to their own handler. For inbound data-related messages such as @@ -147,7 +159,7 @@ type outMsg struct { // to push messages to the peer. Internally they use QueueMessage. type peer struct { server *server - btcnet wire.BitcoinNet + dcrnet wire.CurrencyNet started int32 connected int32 disconnect int32 // only to be used atomically @@ -160,18 +172,18 @@ type peer struct { knownAddresses map[string]struct{} knownInventory *MruInventoryMap knownInvMutex sync.Mutex - requestedTxns map[wire.ShaHash]struct{} // owned by blockmanager - requestedBlocks map[wire.ShaHash]struct{} // owned by blockmanager + requestedTxns map[chainhash.Hash]struct{} // owned by blockmanager + requestedBlocks map[chainhash.Hash]struct{} // owned by blockmanager retryCount int64 - prevGetBlocksBegin *wire.ShaHash // owned by blockmanager - prevGetBlocksStop *wire.ShaHash // owned by blockmanager - prevGetHdrsBegin *wire.ShaHash // owned by blockmanager - prevGetHdrsStop *wire.ShaHash // owned by blockmanager + prevGetBlocksBegin *chainhash.Hash // owned by blockmanager + prevGetBlocksStop *chainhash.Hash // owned by blockmanager + prevGetHdrsBegin *chainhash.Hash // owned by blockmanager + prevGetHdrsStop *chainhash.Hash // owned by blockmanager requestQueue []*wire.InvVect filter *bloom.Filter relayMtx sync.Mutex disableRelayTx bool - continueHash *wire.ShaHash + continueHash *chainhash.Hash outputQueue chan outMsg sendQueue chan outMsg sendDoneQueue chan struct{} @@ -195,7 +207,7 @@ type peer struct { userAgent string startingHeight int32 lastBlock int32 - lastAnnouncedBlock *wire.ShaHash + lastAnnouncedBlock *chainhash.Hash lastPingNonce uint64 // Set to nonce if we have a pending ping. lastPingTime time.Time // Time we sent last ping. lastPingMicros int64 // Time for last ping to return. @@ -232,7 +244,7 @@ func (p *peer) UpdateLastBlockHeight(newHeight int32) { // UpdateLastAnnouncedBlock updates meta-data about the last block sha this // peer is known to have announced. It is safe for concurrent access. -func (p *peer) UpdateLastAnnouncedBlock(blkSha *wire.ShaHash) { +func (p *peer) UpdateLastAnnouncedBlock(blkSha *chainhash.Hash) { p.StatsMtx.Lock() defer p.StatsMtx.Unlock() @@ -355,8 +367,7 @@ func (p *peer) updateAddresses(msg *wire.MsgVersion) { // Request known addresses if the server address manager needs // more and the peer has a protocol version new enough to // include a timestamp with addresses. - hasTimestamp := p.ProtocolVersion() >= - wire.NetAddressTimeVersion + hasTimestamp := true if p.server.addrManager.NeedMoreAddresses() && hasTimestamp { p.QueueMessage(wire.NewMsgGetAddr(), nil) } @@ -375,29 +386,26 @@ func (p *peer) updateAddresses(msg *wire.MsgVersion) { } } -// handleVersionMsg is invoked when a peer receives a version bitcoin message +// handleVersionMsg is invoked when a peer receives a version decred message // and is used to negotiate the protocol version details as well as kick start // the communications. -func (p *peer) handleVersionMsg(msg *wire.MsgVersion) { +func (p *peer) handleVersionMsg(msg *wire.MsgVersion) error { // Detect self connections. - if msg.Nonce == p.server.nonce { - peerLog.Debugf("Disconnecting peer connected to self %s", p) - p.Disconnect() - return + if !allowSelfConns && msg.Nonce == p.server.nonce { + return errors.New("disconnecting peer connected to self") } // Notify and disconnect clients that have a protocol version that is // too old. - if msg.ProtocolVersion < int32(wire.MultipleAddressVersion) { + if msg.ProtocolVersion < 1 { // TODO fix magic number cj // Send a reject message indicating the protocol version is // obsolete and wait for the message to be sent before // disconnecting. reason := fmt.Sprintf("protocol version must be %d or greater", - wire.MultipleAddressVersion) + 1) p.PushRejectMsg(msg.Command(), wire.RejectObsolete, reason, nil, true) - p.Disconnect() - return + return errors.New(reason) } // Updating a bunch of stats. @@ -405,8 +413,6 @@ func (p *peer) handleVersionMsg(msg *wire.MsgVersion) { // Limit to one version message per peer. if p.versionKnown { - p.logError("Only one version message per peer is allowed %s.", - p) p.StatsMtx.Unlock() // Send an reject message indicating the version message was @@ -415,8 +421,7 @@ func (p *peer) handleVersionMsg(msg *wire.MsgVersion) { p.PushRejectMsg(msg.Command(), wire.RejectDuplicate, "duplicate version message", nil, true) - p.Disconnect() - return + return errors.New("only one version message per peer is allowed") } // Negotiate the protocol version. @@ -455,19 +460,14 @@ func (p *peer) handleVersionMsg(msg *wire.MsgVersion) { // at connection time and no point recomputing. na, err := newNetAddress(p.conn.RemoteAddr(), p.services) if err != nil { - p.logError("Can't get remote address: %v", err) - p.Disconnect() - return + return err } p.na = na // Send version. err = p.pushVersionMsg() if err != nil { - p.logError("Can't send version message to %s: %v", - p, err) - p.Disconnect() - return + return err } } @@ -491,11 +491,13 @@ func (p *peer) handleVersionMsg(msg *wire.MsgVersion) { p.server.blockManager.NewPeer(p) // TODO: Relay alerts. + + return nil } // pushTxMsg sends a tx message for the provided transaction hash to the // connected peer. An error is returned if the transaction hash is not known. -func (p *peer) pushTxMsg(sha *wire.ShaHash, doneChan, waitChan chan struct{}) error { +func (p *peer) pushTxMsg(sha *chainhash.Hash, doneChan, waitChan chan struct{}) error { // Attempt to fetch the requested transaction from the pool. A // call could be made to check for existence first, but simply trying // to fetch a missing transaction results in the same behavior. @@ -522,8 +524,8 @@ func (p *peer) pushTxMsg(sha *wire.ShaHash, doneChan, waitChan chan struct{}) er // pushBlockMsg sends a block message for the provided block hash to the // connected peer. An error is returned if the block hash is not known. -func (p *peer) pushBlockMsg(sha *wire.ShaHash, doneChan, waitChan chan struct{}) error { - blk, err := p.server.db.FetchBlockBySha(sha) +func (p *peer) pushBlockMsg(sha *chainhash.Hash, doneChan, waitChan chan struct{}) error { + blk, err := p.server.blockManager.GetBlockFromHash(*sha) if err != nil { peerLog.Tracef("Unable to fetch requested block sha %v: %v", sha, err) @@ -572,7 +574,7 @@ func (p *peer) pushBlockMsg(sha *wire.ShaHash, doneChan, waitChan chan struct{}) // the connected peer. Since a merkle block requires the peer to have a filter // loaded, this call will simply be ignored if there is no filter loaded. An // error is returned if the block hash is not known. -func (p *peer) pushMerkleBlockMsg(sha *wire.ShaHash, doneChan, waitChan chan struct{}) error { +func (p *peer) pushMerkleBlockMsg(sha *chainhash.Hash, doneChan, waitChan chan struct{}) error { // Do not send a response if the peer doesn't have a filter loaded. if !p.filter.IsLoaded() { if doneChan != nil { @@ -646,11 +648,11 @@ func (p *peer) pushMerkleBlockMsg(sha *wire.ShaHash, doneChan, waitChan chan str // PushGetBlocksMsg sends a getblocks message for the provided block locator // and stop hash. It will ignore back-to-back duplicate requests. -func (p *peer) PushGetBlocksMsg(locator blockchain.BlockLocator, stopHash *wire.ShaHash) error { +func (p *peer) PushGetBlocksMsg(locator blockchain.BlockLocator, stopHash *chainhash.Hash) error { // Extract the begin hash from the block locator, if one was specified, // to use for filtering duplicate getblocks requests. // request. - var beginHash *wire.ShaHash + var beginHash *chainhash.Hash if len(locator) > 0 { beginHash = locator[0] } @@ -684,10 +686,10 @@ func (p *peer) PushGetBlocksMsg(locator blockchain.BlockLocator, stopHash *wire. // PushGetHeadersMsg sends a getblocks message for the provided block locator // and stop hash. It will ignore back-to-back duplicate requests. -func (p *peer) PushGetHeadersMsg(locator blockchain.BlockLocator, stopHash *wire.ShaHash) error { +func (p *peer) PushGetHeadersMsg(locator blockchain.BlockLocator, stopHash *chainhash.Hash) error { // Extract the begin hash from the block locator, if one was specified, // to use for filtering duplicate getheaders requests. - var beginHash *wire.ShaHash + var beginHash *chainhash.Hash if len(locator) > 0 { beginHash = locator[0] } @@ -720,14 +722,51 @@ func (p *peer) PushGetHeadersMsg(locator blockchain.BlockLocator, stopHash *wire return nil } +// PushGetMiningStateMsg sends a request to a peer for mining state information. +func (p *peer) PushGetMiningStateMsg() error { + msg := wire.NewMsgGetMiningState() + + p.QueueMessage(msg, nil) + return nil +} + +// PushMiningStateMsg pushes a mining state message to the queue for a requesting +// peer. +func (p *peer) PushMiningStateMsg(height uint32, blockHashes []*chainhash.Hash, voteHashes []*chainhash.Hash) error { + // Nothing to send, abort. + if len(blockHashes) == 0 { + return nil + } + + // Construct the mining state request and queue it to be sent. + msg := wire.NewMsgMiningState() + msg.Height = height + for _, hash := range blockHashes { + err := msg.AddBlockHash(hash) + if err != nil { + return err + } + } + for _, hash := range voteHashes { + err := msg.AddVoteHash(hash) + if err != nil { + return err + } + } + + p.QueueMessage(msg, nil) + + return nil +} + // PushRejectMsg sends a reject message for the provided command, reject code, // and reject reason, and hash. The hash will only be used when the command // is a tx or block and should be nil in other cases. The wait parameter will // cause the function to block until the reject message has actually been sent. -func (p *peer) PushRejectMsg(command string, code wire.RejectCode, reason string, hash *wire.ShaHash, wait bool) { +func (p *peer) PushRejectMsg(command string, code wire.RejectCode, reason string, hash *chainhash.Hash, wait bool) { // Don't bother sending the reject message if the protocol version // is too low. - if p.VersionKnown() && p.ProtocolVersion() < wire.RejectVersion { + if p.VersionKnown() && p.ProtocolVersion() < 1 { // TODO fix magic number cj return } @@ -754,7 +793,7 @@ func (p *peer) PushRejectMsg(command string, code wire.RejectCode, reason string <-doneChan } -// handleMemPoolMsg is invoked when a peer receives a mempool bitcoin message. +// handleMemPoolMsg is invoked when a peer receives a mempool decred message. // It creates and sends an inventory message with the contents of the memory // pool up to the maximum inventory allowed per message. When the peer has a // bloom filter loaded, the contents are filtered accordingly. @@ -793,15 +832,115 @@ func (p *peer) handleMemPoolMsg(msg *wire.MsgMemPool) { } } -// handleTxMsg is invoked when a peer receives a tx bitcoin message. It blocks -// until the bitcoin transaction has been fully processed. Unlock the block +// handleGetMiningStateMsg handles a request from a peer to get the current +// mining state. +func (p *peer) handleGetMiningStateMsg(msg *wire.MsgGetMiningState) { + // Access the block manager and get the list of best blocks to + // mine on. + bm := p.server.blockManager + mp := p.server.txMemPool + newest, height := bm.chainState.Best() + + // Send out blank mining states if it's early in the blockchain. + if height < activeNetParams.StakeValidationHeight-1 { + err := p.PushMiningStateMsg(0, nil, nil) + if err != nil { + peerLog.Warnf("unexpected error while pushing data for "+ + "mining state request: %v", err.Error()) + } + + return + } + + children, err := bm.GetGeneration(*newest) + if err != nil { + peerLog.Warnf("failed to access block manager to get the generation "+ + "for a mining state request (block: %v)", newest) + return + } + + // Get the list of blocks that we can actually build on top of. + eligibleParents, err := mp.SortParentsByVotes(*newest, children) + if err != nil { + // We couldn't find enough voters for any block, so just return now. + if err.(MiningRuleError).GetCode() == ErrNotEnoughVoters { + return + } else { + peerLog.Warnf("unexpected mempool error while sorting eligible "+ + "parents for mining state request: %v", err.Error()) + } + return + } + + // Nothing to send, abort. + if len(eligibleParents) == 0 { + return + } + + // Construct the set of block hashes to send. + numBlocks := len(eligibleParents) + if numBlocks > wire.MaxMSBlocksAtHeadPerMsg { + numBlocks = wire.MaxMSBlocksAtHeadPerMsg + } + blockHashes := make([]*chainhash.Hash, numBlocks, numBlocks) + for i, h := range eligibleParents { + if i >= wire.MaxMSBlocksAtHeadPerMsg { + break + } + hP := new(chainhash.Hash) + hP.SetBytes(h.Bytes()) + blockHashes[i] = hP + } + + // Construct the set of votes to send. + voteHashes := make([]*chainhash.Hash, 0, wire.MaxMSVotesAtHeadPerMsg) + for _, bh := range blockHashes { + // Fetch the vote hashes themselves and append them. + vhsForBlock, err := mp.GetVoteHashesForBlock(*bh) + if err != nil { + peerLog.Warnf("unexpected error while fetching vote hashes "+ + "for block %v for a mining state request: %v", + bh, err.Error()) + return + } + for _, vh := range vhsForBlock { + vhP := new(chainhash.Hash) + vhP.SetBytes(vh.Bytes()) + voteHashes = append(voteHashes, vhP) + } + } + + err = p.PushMiningStateMsg(uint32(height), blockHashes, voteHashes) + if err != nil { + peerLog.Warnf("unexpected error while pushing data for "+ + "mining state request: %v", err.Error()) + } + + return +} + +// handleMiningStateMsg handles incoming mining state information from a mining +// state message. The response is to request these blocks and vote transactions +// from the peer that sent them. +func (p *peer) handleMiningStateMsg(msg *wire.MsgMiningState) { + err := p.server.blockManager.RequestFromPeer(p, msg.BlockHashes, + msg.VoteHashes) + if err != nil { + peerLog.Warnf("couldn't handle mining state message: %v", err.Error()) + } + + return +} + +// handleTxMsg is invoked when a peer receives a tx decred message. It blocks +// until the decred transaction has been fully processed. Unlock the block // handler this does not serialize all transactions through a single thread // transactions don't rely on the previous one in a linear fashion like blocks. func (p *peer) handleTxMsg(msg *wire.MsgTx) { // Add the transaction to the known inventory for the peer. - // Convert the raw MsgTx to a btcutil.Tx which provides some convenience + // Convert the raw MsgTx to a dcrutil.Tx which provides some convenience // methods and things such as hash caching. - tx := btcutil.NewTx(msg) + tx := dcrutil.NewTx(msg) iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha()) p.AddKnownInventory(iv) @@ -814,12 +953,12 @@ func (p *peer) handleTxMsg(msg *wire.MsgTx) { <-p.txProcessed } -// handleBlockMsg is invoked when a peer receives a block bitcoin message. It -// blocks until the bitcoin block has been fully processed. +// handleBlockMsg is invoked when a peer receives a block decred message. It +// blocks until the decred block has been fully processed. func (p *peer) handleBlockMsg(msg *wire.MsgBlock, buf []byte) { - // Convert the raw MsgBlock to a btcutil.Block which provides some + // Convert the raw MsgBlock to a dcrutil.Block which provides some // convenience methods and things such as hash caching. - block := btcutil.NewBlockFromBlockAndBytes(msg, buf) + block := dcrutil.NewBlockFromBlockAndBytes(msg, buf) // Add the block to the known inventory for the peer. iv := wire.NewInvVect(wire.InvTypeBlock, block.Sha()) @@ -827,7 +966,7 @@ func (p *peer) handleBlockMsg(msg *wire.MsgBlock, buf []byte) { // Queue the block up to be handled by the block // manager and intentionally block further receives - // until the bitcoin block is fully processed and known + // until the decred block is fully processed and known // good or bad. This helps prevent a malicious peer // from queueing up a bunch of bad blocks before // disconnecting (or being disconnected) and wasting @@ -835,12 +974,12 @@ func (p *peer) handleBlockMsg(msg *wire.MsgBlock, buf []byte) { // by at least the block acceptance test tool as the // reference implementation processes blocks in the same // thread and therefore blocks further messages until - // the bitcoin block has been fully processed. + // the decred block has been fully processed. p.server.blockManager.QueueBlock(block, p) <-p.blockProcessed } -// handleInvMsg is invoked when a peer receives an inv bitcoin message and is +// handleInvMsg is invoked when a peer receives an inv decred message and is // used to examine the inventory being advertised by the remote peer and react // accordingly. We pass the message down to blockmanager which will call // QueueMessage with any appropriate responses. @@ -848,13 +987,13 @@ func (p *peer) handleInvMsg(msg *wire.MsgInv) { p.server.blockManager.QueueInv(msg, p) } -// handleHeadersMsg is invoked when a peer receives a headers bitcoin message. +// handleHeadersMsg is invoked when a peer receives a headers decred message. // The message is passed down to the block manager. func (p *peer) handleHeadersMsg(msg *wire.MsgHeaders) { p.server.blockManager.QueueHeaders(msg, p) } -// handleGetData is invoked when a peer receives a getdata bitcoin message and +// handleGetData is invoked when a peer receives a getdata decred message and // is used to deliver block and transaction information. func (p *peer) handleGetDataMsg(msg *wire.MsgGetData) { numAdded := 0 @@ -874,7 +1013,7 @@ func (p *peer) handleGetDataMsg(msg *wire.MsgGetData) { c = doneChan } else if (i+1)%3 == 0 { // Buffered so as to not make the send goroutine block. - c = make(chan struct{}, 1) + c = make(chan struct{}, 10000) } var err error switch iv.Type { @@ -918,7 +1057,7 @@ func (p *peer) handleGetDataMsg(msg *wire.MsgGetData) { } } -// handleGetBlocksMsg is invoked when a peer receives a getblocks bitcoin message. +// handleGetBlocksMsg is invoked when a peer receives a getblocks decred message. func (p *peer) handleGetBlocksMsg(msg *wire.MsgGetBlocks) { // Return all block hashes to the latest one (up to max per message) if // no stop hash was specified. @@ -998,7 +1137,7 @@ func (p *peer) handleGetBlocksMsg(msg *wire.MsgGetBlocks) { } } -// handleGetHeadersMsg is invoked when a peer receives a getheaders bitcoin +// handleGetHeadersMsg is invoked when a peer receives a getheaders decred // message. func (p *peer) handleGetHeadersMsg(msg *wire.MsgGetHeaders) { // Attempt to look up the height of the provided stop hash. @@ -1091,7 +1230,7 @@ func (p *peer) handleGetHeadersMsg(msg *wire.MsgGetHeaders) { p.QueueMessage(headersMsg, nil) } -// handleFilterAddMsg is invoked when a peer receives a filteradd bitcoin +// handleFilterAddMsg is invoked when a peer receives a filteradd decred // message and is used by remote peers to add data to an already loaded bloom // filter. The peer will be disconnected if a filter is not loaded when this // message is received. @@ -1106,7 +1245,7 @@ func (p *peer) handleFilterAddMsg(msg *wire.MsgFilterAdd) { p.filter.Add(msg.Data) } -// handleFilterClearMsg is invoked when a peer receives a filterclear bitcoin +// handleFilterClearMsg is invoked when a peer receives a filterclear decred // message and is used by remote peers to clear an already loaded bloom filter. // The peer will be disconnected if a filter is not loaded when this message is // received. @@ -1120,7 +1259,7 @@ func (p *peer) handleFilterClearMsg(msg *wire.MsgFilterClear) { p.filter.Unload() } -// handleFilterLoadMsg is invoked when a peer receives a filterload bitcoin +// handleFilterLoadMsg is invoked when a peer receives a filterload decred // message and it used to load a bloom filter that should be used for delivering // merkle blocks and associated transactions that match the filter. func (p *peer) handleFilterLoadMsg(msg *wire.MsgFilterLoad) { @@ -1133,7 +1272,7 @@ func (p *peer) handleFilterLoadMsg(msg *wire.MsgFilterLoad) { p.filter.Reload(msg) } -// handleGetAddrMsg is invoked when a peer receives a getaddr bitcoin message +// handleGetAddrMsg is invoked when a peer receives a getaddr decred message // and is used to provide the peer with known addresses from the address // manager. func (p *peer) handleGetAddrMsg(msg *wire.MsgGetAddr) { @@ -1205,7 +1344,7 @@ func (p *peer) pushAddrMsg(addresses []*wire.NetAddress) error { return nil } -// handleAddrMsg is invoked when a peer receives an addr bitcoin message and +// handleAddrMsg is invoked when a peer receives an addr decred message and // is used to notify the server about advertised addresses. func (p *peer) handleAddrMsg(msg *wire.MsgAddr) { // Ignore addresses when running on the simulation test network. This @@ -1216,11 +1355,6 @@ func (p *peer) handleAddrMsg(msg *wire.MsgAddr) { return } - // Ignore old style addresses which don't include a timestamp. - if p.ProtocolVersion() < wire.NetAddressTimeVersion { - return - } - // A message that has no addresses is invalid. if len(msg.AddrList) == 0 { p.logError("Command [%s] from %s does not contain any addresses", @@ -1255,19 +1389,16 @@ func (p *peer) handleAddrMsg(msg *wire.MsgAddr) { p.server.addrManager.AddAddresses(msg.AddrList, p.na) } -// handlePingMsg is invoked when a peer receives a ping bitcoin message. For +// handlePingMsg is invoked when a peer receives a ping decred message. For // recent clients (protocol version > BIP0031Version), it replies with a pong // message. For older clients, it does nothing and anything other than failure // is considered a successful ping. func (p *peer) handlePingMsg(msg *wire.MsgPing) { - // Only Reply with pong is message comes from a new enough client. - if p.ProtocolVersion() > wire.BIP0031Version { - // Include nonce from ping so pong can be identified. - p.QueueMessage(wire.NewMsgPong(msg.Nonce), nil) - } + // Include nonce from ping so pong can be identified. + p.QueueMessage(wire.NewMsgPong(msg.Nonce), nil) } -// handlePongMsg is invoked when a peer received a pong bitcoin message. +// handlePongMsg is invoked when a peer received a pong decred message. // recent clients (protocol version > BIP0031Version), and if we had send a ping // previosuly we update our ping time statistics. If the client is too old or // we had not send a ping we ignore it. @@ -1283,18 +1414,17 @@ func (p *peer) handlePongMsg(msg *wire.MsgPong) { // without large usage of the ping rpc call since we ping // infrequently enough that if they overlap we would have timed out // the peer. - if p.protocolVersion > wire.BIP0031Version && - p.lastPingNonce != 0 && msg.Nonce == p.lastPingNonce { + if p.lastPingNonce != 0 && msg.Nonce == p.lastPingNonce { p.lastPingMicros = time.Now().Sub(p.lastPingTime).Nanoseconds() p.lastPingMicros /= 1000 // convert to usec. p.lastPingNonce = 0 } } -// readMessage reads the next bitcoin message from the peer with logging. +// readMessage reads the next decred message from the peer with logging. func (p *peer) readMessage() (wire.Message, []byte, error) { n, msg, buf, err := wire.ReadMessageN(p.conn, p.ProtocolVersion(), - p.btcnet) + p.dcrnet) p.StatsMtx.Lock() p.bytesReceived += uint64(n) p.StatsMtx.Unlock() @@ -1324,7 +1454,7 @@ func (p *peer) readMessage() (wire.Message, []byte, error) { return msg, buf, nil } -// writeMessage sends a bitcoin Message to the peer with logging. +// writeMessage sends a decred Message to the peer with logging. func (p *peer) writeMessage(msg wire.Message) { // Don't do anything if we're disconnecting. if atomic.LoadInt32(&p.disconnect) != 0 { @@ -1360,7 +1490,7 @@ func (p *peer) writeMessage(msg wire.Message) { peerLog.Tracef("%v", newLogClosure(func() string { var buf bytes.Buffer err := wire.WriteMessage(&buf, msg, p.ProtocolVersion(), - p.btcnet) + p.dcrnet) if err != nil { return err.Error() } @@ -1369,7 +1499,7 @@ func (p *peer) writeMessage(msg wire.Message) { // Write the message to the peer. n, err := wire.WriteMessageN(p.conn, msg, p.ProtocolVersion(), - p.btcnet) + p.dcrnet) p.StatsMtx.Lock() p.bytesSent += uint64(n) p.StatsMtx.Unlock() @@ -1381,32 +1511,6 @@ func (p *peer) writeMessage(msg wire.Message) { } } -// isAllowedByRegression returns whether or not the passed error is allowed by -// regression tests without disconnecting the peer. In particular, regression -// tests need to be allowed to send malformed messages without the peer being -// disconnected. -func (p *peer) isAllowedByRegression(err error) bool { - // Don't allow the error if it's not specifically a malformed message - // error. - if _, ok := err.(*wire.MessageError); !ok { - return false - } - - // Don't allow the error if it's not coming from localhost or the - // hostname can't be determined for some reason. - host, _, err := net.SplitHostPort(p.addr) - if err != nil { - return false - } - - if host != "127.0.0.1" && host != "localhost" { - return false - } - - // Allowed if all checks passed. - return true -} - // inHandler handles all incoming messages for the peer. It must be run as a // goroutine. func (p *peer) inHandler() { @@ -1426,17 +1530,6 @@ out: // Stop the timer now, if we go around again we will reset it. idleTimer.Stop() if err != nil { - // In order to allow regression tests with malformed - // messages, don't disconnect the peer when we're in - // regression test mode and the error is one of the - // allowed errors. - if cfg.RegressionTest && p.isAllowedByRegression(err) { - peerLog.Errorf("Allowed regression test "+ - "error from %s: %v", p, err) - idleTimer.Reset(idleTimeoutMinutes * time.Minute) - continue - } - // Only log the error and possibly send reject message // if we're not forcibly disconnecting. if atomic.LoadInt32(&p.disconnect) == 0 { @@ -1485,7 +1578,13 @@ out: // Handle each supported message type. switch msg := rmsg.(type) { case *wire.MsgVersion: - p.handleVersionMsg(msg) + err := p.handleVersionMsg(msg) + if err != nil { + peerLog.Debugf("New peer %v - error negotiating protocol: %v", + p, err) + p.Disconnect() + break out + } case *wire.MsgVerAck: p.StatsMtx.Lock() @@ -1530,6 +1629,12 @@ out: case *wire.MsgMemPool: p.handleMemPoolMsg(msg) + case *wire.MsgMiningState: + p.handleMiningStateMsg(msg) + + case *wire.MsgGetMiningState: + p.handleGetMiningStateMsg(msg) + case *wire.MsgTx: p.handleTxMsg(msg) @@ -1603,7 +1708,7 @@ out: func (p *peer) queueHandler() { pendingMsgs := list.New() invSendQueue := list.New() - trickleTicker := time.NewTicker(time.Second * 10) + trickleTicker := time.NewTicker(queueEmptyFrequency) defer trickleTicker.Stop() // We keep the waiting flag so that we know if we have a message queued @@ -1768,10 +1873,8 @@ out: // expects pong // Also set up statistics. p.StatsMtx.Lock() - if p.protocolVersion > wire.BIP0031Version { - p.lastPingNonce = m.Nonce - p.lastPingTime = time.Now() - } + p.lastPingNonce = m.Nonce + p.lastPingTime = time.Now() p.StatsMtx.Unlock() case *wire.MsgMemPool: // Should return an inv. @@ -1827,7 +1930,7 @@ cleanup: peerLog.Tracef("Peer output handler done for %s", p) } -// QueueMessage adds the passed bitcoin message to the peer send queue. It +// QueueMessage adds the passed decred message to the peer send queue. It // uses a buffered channel to communicate with the output handler goroutine so // it is automatically rate limited and safe for concurrent access. func (p *peer) QueueMessage(msg wire.Message, doneChan chan struct{}) { @@ -1933,20 +2036,20 @@ func (p *peer) Shutdown() { p.Disconnect() } -// newPeerBase returns a new base bitcoin peer for the provided server and +// newPeerBase returns a new base decred peer for the provided server and // inbound flag. This is used by the newInboundPeer and newOutboundPeer // functions to perform base setup needed by both types of peers. func newPeerBase(s *server, inbound bool) *peer { p := peer{ server: s, protocolVersion: maxProtocolVersion, - btcnet: s.chainParams.Net, + dcrnet: s.chainParams.Net, services: wire.SFNodeNetwork, inbound: inbound, knownAddresses: make(map[string]struct{}), knownInventory: NewMruInventoryMap(maxKnownInventory), - requestedTxns: make(map[wire.ShaHash]struct{}), - requestedBlocks: make(map[wire.ShaHash]struct{}), + requestedTxns: make(map[chainhash.Hash]struct{}), + requestedBlocks: make(map[chainhash.Hash]struct{}), filter: bloom.LoadFilter(nil), outputQueue: make(chan outMsg, outputBufferSize), sendQueue: make(chan outMsg, 1), // nonblocking sync @@ -1959,7 +2062,7 @@ func newPeerBase(s *server, inbound bool) *peer { return &p } -// newInboundPeer returns a new inbound bitcoin peer for the provided server and +// newInboundPeer returns a new inbound decred peer for the provided server and // connection. Use Start to begin processing incoming and outgoing messages. func newInboundPeer(s *server, conn net.Conn) *peer { p := newPeerBase(s, true) @@ -1970,7 +2073,7 @@ func newInboundPeer(s *server, conn net.Conn) *peer { return p } -// newOutbountPeer returns a new outbound bitcoin peer for the provided server and +// newOutbountPeer returns a new outbound decred peer for the provided server and // address and connects to it asynchronously. If the connection is successful // then the peer will also be started. func newOutboundPeer(s *server, addr string, persistent bool, retryCount int64) *peer { @@ -2019,7 +2122,7 @@ func newOutboundPeer(s *server, addr string, persistent bool, retryCount int64) time.Sleep(scaledDuration) } srvrLog.Debugf("Attempting to connect to %s", addr) - conn, err := btcdDial("tcp", addr) + conn, err := dcrdDial("tcp", addr) if err != nil { srvrLog.Debugf("Failed to connect to %s: %v", addr, err) p.server.donePeers <- p diff --git a/release/prep_release.sh b/release/prep_release.sh index c64824a6..be465052 100644 --- a/release/prep_release.sh +++ b/release/prep_release.sh @@ -1,6 +1,7 @@ #!/bin/sh # # Copyright (c) 2013 Conformal Systems LLC +# Copyright (c) 2016 The Decred Authors # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -22,11 +23,11 @@ # - Updates project changes file with release notes # -PROJECT=btcd +PROJECT=dcrd PROJECT_UC=$(echo $PROJECT | tr '[:lower:]' '[:upper:]') SCRIPT=$(basename $0) VERFILE=../version.go -VERFILES="$VERFILE ../cmd/btcctl/version.go" +VERFILES="$VERFILE ../cmd/dcrctl/version.go" PROJ_CHANGES=../CHANGES # verify params diff --git a/rpcserver.go b/rpcserver.go index 797eadef..13634b58 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,6 @@ import ( "crypto/subtle" "crypto/tls" "encoding/base64" - "encoding/binary" "encoding/hex" "encoding/json" "errors" @@ -27,16 +27,19 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/btcsuite/fastsha256" "github.com/btcsuite/websocket" + + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/dcrjson" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -51,20 +54,15 @@ const ( // getworkDataLen is the length of the data field of the getwork RPC. // It consists of the serialized block header plus the internal sha256 - // padding. The internal sha256 padding consists of a single 1 bit - // followed by enough zeros to pad the message out to 56 bytes followed - // by length of the message in bits encoded as a big-endian uint64 - // (8 bytes). Thus, the resulting length is a multiple of the sha256 - // block size (64 bytes). - getworkDataLen = (1 + ((wire.MaxBlockHeaderPayload + 8) / - fastsha256.BlockSize)) * fastsha256.BlockSize + // padding. The internal padding for BLAKE256 with 180 bytes is + // 4*3 bytes. + getworkDataLen = ((wire.MaxBlockHeaderPayload + 4*3) / + chainhash.HashBlockSize) * chainhash.HashBlockSize - // hash1Len is the length of the hash1 field of the getwork RPC. It - // consists of a zero hash plus the internal sha256 padding. See - // the getworkDataLen comment for details about the internal sha256 - // padding format. - hash1Len = (1 + ((wire.HashSize + 8) / fastsha256.BlockSize)) * - fastsha256.BlockSize + // getworkExpirationDiff is the number of blocks below the current + // best block in height to begin pruning out old block work from + // the template pool. + getworkExpirationDiff = 3 // gbtNonceRange is two 32-bit big-endian hexadecimal integers which // represent the valid ranges of nonces returned by the getblocktemplate @@ -91,7 +89,7 @@ var ( // in the coinbase signature script. It is declared here to avoid the // overhead of creating a new object on every invocation for constant // data. - gbtCoinbaseAux = &btcjson.GetBlockTemplateResultAux{ + gbtCoinbaseAux = &dcrjson.GetBlockTemplateResultAux{ Flags: hex.EncodeToString(builderScript(txscript. NewScriptBuilder().AddData([]byte(coinbaseFlags)))), } @@ -107,15 +105,15 @@ var ( var ( // ErrRPCUnimplemented is an error returned to RPC clients when the // provided command is recognized, but not implemented. - ErrRPCUnimplemented = &btcjson.RPCError{ - Code: btcjson.ErrRPCUnimplemented, + ErrRPCUnimplemented = &dcrjson.RPCError{ + Code: dcrjson.ErrRPCUnimplemented, Message: "Command unimplemented", } // ErrRPCNoWallet is an error returned to RPC clients when the provided // command is recognized as a wallet command. - ErrRPCNoWallet = &btcjson.RPCError{ - Code: btcjson.ErrRPCNoWallet, + ErrRPCNoWallet = &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoWallet, Message: "This implementation does not implement wallet commands", } ) @@ -128,10 +126,15 @@ type commandHandler func(*rpcServer, interface{}, <-chan struct{}) (interface{}, var rpcHandlers map[string]commandHandler var rpcHandlersBeforeInit = map[string]commandHandler{ "addnode": handleAddNode, + "createrawsstx": handleCreateRawSStx, + "createrawssgentx": handleCreateRawSSGenTx, + "createrawssrtx": handleCreateRawSSRtx, "createrawtransaction": handleCreateRawTransaction, "debuglevel": handleDebugLevel, "decoderawtransaction": handleDecodeRawTransaction, "decodescript": handleDecodeScript, + "estimatefee": handleEstimateFee, + "existsaddress": handleExistsAddress, "generate": handleGenerate, "getaddednodeinfo": handleGetAddedNodeInfo, "getbestblock": handleGetBestBlock, @@ -152,24 +155,29 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getpeerinfo": handleGetPeerInfo, "getrawmempool": handleGetRawMempool, "getrawtransaction": handleGetRawTransaction, + "getstakedifficulty": handleGetStakeDifficulty, "gettxout": handleGetTxOut, "getwork": handleGetWork, "help": handleHelp, + "missedtickets": handleMissedTickets, "node": handleNode, "ping": handlePing, + "rebroadcastmissed": handleRebroadcastMissed, + "rebroadcastwinners": handleRebroadcastWinners, "searchrawtransactions": handleSearchRawTransactions, "sendrawtransaction": handleSendRawTransaction, "setgenerate": handleSetGenerate, "stop": handleStop, "submitblock": handleSubmitBlock, + "ticketsforaddress": handleTicketsForAddress, "validateaddress": handleValidateAddress, "verifychain": handleVerifyChain, "verifymessage": handleVerifyMessage, } -// list of commands that we recognise, but for which btcd has no support because +// list of commands that we recognise, but for which dcrd has no support because // it lacks support for wallet functionality. For these commands the user -// should ask a connected instance of btcwallet. +// should ask a connected instance of dcrwallet. var rpcAskWallet = map[string]struct{}{ "addmultisigaddress": struct{}{}, "backupwallet": struct{}{}, @@ -217,7 +225,6 @@ var rpcAskWallet = map[string]struct{}{ // Commands that are currently unimplemented, but should ultimately be. var rpcUnimplemented = map[string]struct{}{ - "estimatefee": struct{}{}, "estimatepriority": struct{}{}, "getblockchaininfo": struct{}{}, "getchaintips": struct{}{}, @@ -277,19 +284,19 @@ func builderScript(builder *txscript.ScriptBuilder) []byte { // RPC server subsystem since internal errors really should not occur. The // context parameter is only used in the log message and may be empty if it's // not needed. -func internalRPCError(errStr, context string) *btcjson.RPCError { +func internalRPCError(errStr, context string) *dcrjson.RPCError { logStr := errStr if context != "" { logStr = context + ": " + errStr } rpcsLog.Error(logStr) - return btcjson.NewRPCError(btcjson.ErrRPCInternal.Code, errStr) + return dcrjson.NewRPCError(dcrjson.ErrRPCInternal.Code, errStr) } // rpcDecodeHexError is a convenience function for returning a nicely formatted // RPC error which indicates the provided hex string failed to decode. -func rpcDecodeHexError(gotHex string) *btcjson.RPCError { - return btcjson.NewRPCError(btcjson.ErrRPCDecodeHexString, +func rpcDecodeHexError(gotHex string) *dcrjson.RPCError { + return dcrjson.NewRPCError(dcrjson.ErrRPCDecodeHexString, fmt.Sprintf("Argument must be hexadecimal string (not %q)", gotHex)) } @@ -297,8 +304,8 @@ func rpcDecodeHexError(gotHex string) *btcjson.RPCError { // workStateBlockInfo houses information about how to reconstruct a block given // its template and signature script. type workStateBlockInfo struct { - msgBlock *wire.MsgBlock - signatureScript []byte + msgBlock *wire.MsgBlock + pkScript []byte } // workState houses state that is used in between multiple RPC invocations to @@ -307,18 +314,15 @@ type workState struct { sync.Mutex lastTxUpdate time.Time lastGenerated time.Time - prevHash *wire.ShaHash + prevHash *chainhash.Hash msgBlock *wire.MsgBlock extraNonce uint64 - blockInfo map[wire.ShaHash]*workStateBlockInfo } // newWorkState returns a new instance of a workState with all internal fields // initialized and ready to use. func newWorkState() *workState { - return &workState{ - blockInfo: make(map[wire.ShaHash]*workStateBlockInfo), - } + return &workState{} } // gbtWorkState houses state that is used in between multiple RPC invocations to @@ -327,10 +331,10 @@ type gbtWorkState struct { sync.Mutex lastTxUpdate time.Time lastGenerated time.Time - prevHash *wire.ShaHash + prevHash *chainhash.Hash minTimestamp time.Time template *BlockTemplate - notifyMap map[wire.ShaHash]map[int64]chan struct{} + notifyMap map[chainhash.Hash]map[int64]chan struct{} timeSource blockchain.MedianTimeSource } @@ -338,7 +342,7 @@ type gbtWorkState struct { // fields initialized and ready to use. func newGbtWorkState(timeSource blockchain.MedianTimeSource) *gbtWorkState { return &gbtWorkState{ - notifyMap: make(map[wire.ShaHash]map[int64]chan struct{}), + notifyMap: make(map[chainhash.Hash]map[int64]chan struct{}), timeSource: timeSource, } } @@ -351,14 +355,14 @@ func handleUnimplemented(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleAskWallet is the handler for commands that are recognized as valid, but // are unable to answer correctly since it involves wallet state. -// These commands will be implemented in btcwallet. +// These commands will be implemented in dcrwallet. func handleAskWallet(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { return nil, ErrRPCNoWallet } // handleAddNode handles addnode commands. func handleAddNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.AddNodeCmd) + c := cmd.(*dcrjson.AddNodeCmd) addr := normalizeAddress(c.Addr, activeNetParams.DefaultPort) var err error @@ -370,15 +374,15 @@ func handleAddNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in case "onetry": err = s.server.ConnectNode(addr, false) default: - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "invalid subcommand for addnode", } } if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: err.Error(), } } @@ -389,7 +393,7 @@ func handleAddNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in // handleNode handles node commands. func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.NodeCmd) + c := cmd.(*dcrjson.NodeCmd) var addr string var nodeId uint64 @@ -406,15 +410,15 @@ func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter addr = normalizeAddress(c.Target, activeNetParams.DefaultPort) err = s.server.DisconnectNodeByAddr(addr) } else { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "invalid address or node ID", } } } if err != nil && peerExists(s.server.PeerInfo(), addr, int32(nodeId)) { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCMisc, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, Message: "can't disconnect a permanent peer, use remove", } } @@ -429,15 +433,15 @@ func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter addr = normalizeAddress(c.Target, activeNetParams.DefaultPort) err = s.server.RemoveNodeByAddr(addr) } else { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "invalid address or node ID", } } } if err != nil && peerExists(s.server.PeerInfo(), addr, int32(nodeId)) { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCMisc, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, Message: "can't remove a temporary peer, use disconnect", } } @@ -454,21 +458,21 @@ func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter case "perm", "temp": err = s.server.ConnectNode(addr, subCmd == "perm") default: - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "invalid subcommand for node connect", } } default: - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "invalid subcommand for node", } } if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: err.Error(), } } @@ -480,7 +484,7 @@ func handleNode(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter // peerExists determines if a certain peer is currently connected given // information about all currently connected peers. Peer existence is // determined using either a target address or node id. -func peerExists(peerInfos []*btcjson.GetPeerInfoResult, addr string, nodeId int32) bool { +func peerExists(peerInfos []*dcrjson.GetPeerInfoResult, addr string, nodeId int32) bool { for _, peerInfo := range peerInfos { if peerInfo.ID == nodeId || peerInfo.Addr == addr { return true @@ -503,18 +507,26 @@ func messageToHex(msg wire.Message) (string, error) { // handleCreateRawTransaction handles createrawtransaction commands. func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.CreateRawTransactionCmd) + c := cmd.(*dcrjson.CreateRawTransactionCmd) // Add all transaction inputs to a new transaction after performing // some validity checks. mtx := wire.NewMsgTx() for _, input := range c.Inputs { - txHash, err := wire.NewShaHashFromStr(input.Txid) + txHash, err := chainhash.NewHashFromStr(input.Txid) if err != nil { return nil, rpcDecodeHexError(input.Txid) } - prevOut := wire.NewOutPoint(txHash, uint32(input.Vout)) + if !(int8(input.Tree) == dcrutil.TxTreeRegular || + int8(input.Tree) == dcrutil.TxTreeStake) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParams.Code, + Message: "Invalid parameter, tx tree must be regular or stake", + } + } + + prevOut := wire.NewOutPoint(txHash, uint32(input.Vout), int8(input.Tree)) txIn := wire.NewTxIn(prevOut, []byte{}) mtx.AddTxIn(txIn) } @@ -523,19 +535,19 @@ func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // some validity checks. for encodedAddr, amount := range c.Amounts { // Ensure amount is in the valid range for monetary amounts. - if amount <= 0 || amount > btcutil.MaxSatoshi { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCType, + if amount <= 0 || amount > dcrutil.MaxAmount { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, Message: "Invalid amount", } } // Decode the provided address. - addr, err := btcutil.DecodeAddress(encodedAddr, + addr, err := dcrutil.DecodeAddress(encodedAddr, activeNetParams.Params) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Invalid address or key: " + err.Error(), } } @@ -544,17 +556,17 @@ func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // the network encoded with the address matches the network the // server is currently on. switch addr.(type) { - case *btcutil.AddressPubKeyHash: - case *btcutil.AddressScriptHash: + case *dcrutil.AddressPubKeyHash: + case *dcrutil.AddressScriptHash: default: - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Invalid address or key", } } if !addr.IsForNet(s.server.chainParams) { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Invalid address: " + encodedAddr + " is for the wrong network", } @@ -563,25 +575,626 @@ func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // Create a new script which pays to the provided address. pkScript, err := txscript.PayToAddrScript(addr) if err != nil { - context := "Failed to generate pay-to-address script" - return nil, internalRPCError(err.Error(), context) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, + Message: err.Error(), + } } - // Convert the amount to satoshi. - satoshi, err := btcutil.NewAmount(amount) + atomic, err := dcrutil.NewAmount(amount) if err != nil { - context := "Failed to convert amount" - return nil, internalRPCError(err.Error(), context) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, + Message: err.Error(), + } } - txOut := wire.NewTxOut(int64(satoshi), pkScript) + txOut := wire.NewTxOut(int64(atomic), pkScript) mtx.AddTxOut(txOut) } - // Return the serialized and hex-encoded transaction. Note that this - // is intentionally not directly returning because the first return - // value is a string and it would result in returning an empty string to - // the client instead of nothing (nil) in the case of an error. + // Return the serialized and hex-encoded transaction. + mtxHex, err := messageToHex(mtx) + if err != nil { + return nil, err + } + return mtxHex, nil +} + +// handleCreateRawSStx handles createrawsstx commands. +func handleCreateRawSStx(s *rpcServer, + cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*dcrjson.CreateRawSStxCmd) + + // Basic sanity checks for the information coming from the cmd. + if len(c.Inputs) != len(c.COuts) { + errStr := fmt.Sprintf("Number of inputs should be equal to "+ + "the number of future commitment/change outs for any sstx;"+ + " %v inputs given, but %v COuts", len(c.Inputs), + len(c.COuts)) + return nil, errors.New(errStr) + } + if len(c.Amount) != 1 { + errStr := fmt.Sprintf("Only one SSGen tagged output is allowed "+ + "per sstx; len ssgenout %v", len(c.Amount)) + return nil, errors.New(errStr) + } + + // Add all transaction inputs to a new transaction after performing + // some validity checks. + mtx := wire.NewMsgTx() + for _, input := range c.Inputs { + txHash, err := chainhash.NewHashFromStr(input.Txid) + if err != nil { + return nil, rpcDecodeHexError(input.Txid) + } + + if input.Vout < 0 { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, vout must be positive", + } + } + + if !(int8(input.Tree) == dcrutil.TxTreeRegular || + int8(input.Tree) == dcrutil.TxTreeStake) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, tx tree must be regular or stake", + } + } + + prevOut := wire.NewOutPoint(txHash, uint32(input.Vout), int8(input.Tree)) + txIn := wire.NewTxIn(prevOut, []byte{}) + mtx.AddTxIn(txIn) + } + + // Add all transaction outputs to the transaction after performing + // some validity checks. + amtTicket := int64(0) + + for encodedAddr, amount := range c.Amount { + // Ensure amount is in the valid range for monetary amounts. + if amount <= 0 || amount > dcrutil.MaxAmount { + errStr := fmt.Sprintf("Invalid sstx commitment amount %v", amount) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, + Message: errStr, + } + } + + // Decode the provided address. + addr, err := dcrutil.DecodeAddress(encodedAddr, + activeNetParams.Params) + if err != nil { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key: " + err.Error(), + } + } + + // Ensure the address is one of the supported types and that + // the network encoded with the address matches the network the + // server is currently on. + switch addr.(type) { + case *dcrutil.AddressPubKeyHash: + default: + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key", + } + } + if !addr.IsForNet(s.server.chainParams) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address: " + encodedAddr + + " is for the wrong network", + } + } + + // Create a new script which pays to the provided address with an + // SStx tagged output. + pkScript, err := txscript.PayToSStx(addr) + if err != nil { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, + Message: err.Error(), + } + } + + txOut := wire.NewTxOut(amount, pkScript) + mtx.AddTxOut(txOut) + + amtTicket += amount + } + + // Calculated the commitment amounts, then create the + // addresses and payout proportions as null data + // outputs. + inputAmts := make([]int64, len(c.Inputs)) + for i, input := range c.Inputs { + inputAmts[i] = input.Amt + } + changeAmts := make([]int64, len(c.COuts)) + for i, cout := range c.COuts { + changeAmts[i] = cout.ChangeAmt + } + + // Check and make sure none of the change overflows + // the input amounts. + for i, amt := range inputAmts { + if changeAmts[i] >= amt { + errStr := fmt.Sprintf("Invalid sstx change amount %v; "+ + "should have been less than input amt of %v", + changeAmts[i], amt) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, + Message: errStr, + } + } + } + + // Obtain the commitment amounts. + _, amountsCommitted, err := stake.GetSStxNullOutputAmounts(inputAmts, + changeAmts, amtTicket) + + if err != nil { + return nil, err + } + + for i, cout := range c.COuts { + // 1. Append future commitment output. + addr, err := dcrutil.DecodeAddress(cout.Addr, + activeNetParams.Params) + if err != nil { + return nil, fmt.Errorf("cannot decode address: %s", err) + } + + // Ensure the address is one of the supported types and that + // the network encoded with the address matches the network the + // server is currently on. + switch addr.(type) { + case *dcrutil.AddressPubKeyHash: + break + case *dcrutil.AddressScriptHash: + break + default: + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key", + } + } + if !addr.IsForNet(s.server.chainParams) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address: " + + " is for the wrong network", + } + } + + // Create an OP_RETURN push containing the pubkeyhash to send rewards to. + // TODO Replace 0x0000 fee limits with an argument passed to the RPC call. + pkScript, err := txscript.GenerateSStxAddrPush(addr, + dcrutil.Amount(amountsCommitted[i]), 0x0000) + if err != nil { + return nil, fmt.Errorf("cannot create txout script: %s", err) + } + txout := wire.NewTxOut(int64(0), pkScript) + mtx.AddTxOut(txout) + + // 2. Append change output. + + // Ensure amount is in the valid range for monetary amounts. + if cout.ChangeAmt < 0 || cout.ChangeAmt > dcrutil.MaxAmount { + errStr := fmt.Sprintf("Invalid sstx change amount %v", + cout.ChangeAmt) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, + Message: errStr, + } + } + + // Decode the provided address. + addr, err = dcrutil.DecodeAddress(cout.ChangeAddr, + activeNetParams.Params) + if err != nil { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address: " + + " is for the wrong network", + } + } + + // Ensure the address is one of the supported types and that + // the network encoded with the address matches the network the + // server is currently on. + switch addr.(type) { + case *dcrutil.AddressPubKeyHash: + break + case *dcrutil.AddressScriptHash: + break + default: + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key", + } + } + if !addr.IsForNet(s.server.chainParams) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: fmt.Sprintf("%s: %q", + "Wront network", + addr), + } + } + + // Create a new script which pays to the provided address with an + // SStx change tagged output. + pkScript, err = txscript.PayToSStxChange(addr) + if err != nil { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, + Message: err.Error(), + } + } + + txOut := wire.NewTxOut(cout.ChangeAmt, pkScript) + mtx.AddTxOut(txOut) + } + + // Make sure we generated a valid SStx. + if _, err := stake.IsSStx(dcrutil.NewTx(mtx)); err != nil { + return nil, err + } + + // Return the serialized and hex-encoded transaction. + mtxHex, err := messageToHex(mtx) + if err != nil { + return nil, err + } + return mtxHex, nil +} + +// handleCreateRawSSGenTx handles createrawssgentx commands. +func handleCreateRawSSGenTx(s *rpcServer, + cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*dcrjson.CreateRawSSGenTxCmd) + + // Only a single SStx should be given + if len(c.Inputs) != 1 { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, SSGen tx only have one valid input", + } + } + + // 1. Fetch the SStx, then calculate all the values we'll need later for + // the generation of the SSGen tx outputs. + // + // Convert the provided transaction hash hex to a ShaHash. + txSha, err := chainhash.NewHashFromStr(c.Inputs[0].Txid) + if err != nil { + rpcsLog.Errorf("Error generating sha: %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, + Message: "c.Inputs[0].Txid must be a hexadecimal string", + } + } + + // Try to fetch the transaction from the memory pool and if that fails, + // try the block database. + var sstxmtx *wire.MsgTx + tx, err := s.server.txMemPool.FetchTransaction(txSha) + if err != nil { + txList, err := s.server.db.FetchTxBySha(txSha) + if err != nil { + rpcsLog.Errorf("Error fetching tx: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, + Message: "No information available about transaction", + } + } + if len(txList) == 0 { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, + Message: "No information available about transaction", + } + } + + lastTx := len(txList) - 1 + sstxmtx = txList[lastTx].Tx + } else { + sstxmtx = tx.MsgTx() + } + + // Store the sstx pubkeyhashes and amounts as found in the transaction + // outputs. + sstx := dcrutil.NewTx(sstxmtx) + ssgenPayTypes, ssgenPkhs, sstxAmts, _, _, _ := + stake.GetSStxStakeOutputInfo(sstx) + + // Get the current reward. + blockSha, curHeight := s.server.blockManager.chainState.Best() + stakeVoteSubsidy := blockchain.CalcStakeVoteSubsidy(curHeight, + activeNetParams.Params) + + // Calculate the output values from this data. + ssgenCalcAmts := stake.GetStakeRewards(sstxAmts, + sstxmtx.TxOut[0].Value, + stakeVoteSubsidy) + + // 2. Add all transaction inputs to a new transaction after performing + // some validity checks. First, add the stake base, then the OP_SSTX + // tagged output. + mtx := wire.NewMsgTx() + + stakeBaseOutPoint := wire.NewOutPoint(&chainhash.Hash{}, + uint32(0xFFFFFFFF), + int8(0x01)) + txInStakeBase := wire.NewTxIn(stakeBaseOutPoint, []byte{}) + mtx.AddTxIn(txInStakeBase) + + for _, input := range c.Inputs { + txHash, err := chainhash.NewHashFromStr(input.Txid) + if err != nil { + return nil, dcrjson.NewRPCError(dcrjson.ErrRPCDecodeHexString, + fmt.Sprintf("Argument must be hexadecimal string (not %q)", + txHash)) + } + + if input.Vout < 0 { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, vout must be positive", + } + } + + if !(int8(input.Tree) == dcrutil.TxTreeStake) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, tx tree of sstx input must be stake", + } + } + + prevOut := wire.NewOutPoint(txHash, uint32(input.Vout), int8(input.Tree)) + txIn := wire.NewTxIn(prevOut, []byte{}) + mtx.AddTxIn(txIn) + } + + // 3. Add the OP_RETURN null data pushes of the block header hash, + // the block height, and votebits, then add all the OP_SSGEN tagged + // outputs. + // + // Block reference output. + blockRefScript, err := txscript.GenerateSSGenBlockRef(*blockSha, + uint32(curHeight)) + if err != nil { + errStr := fmt.Sprintf("SSGen block reference output failed to generate"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + blockRefOut := wire.NewTxOut(0, blockRefScript) + mtx.AddTxOut(blockRefOut) + + // Votebits output. + blockVBScript, err := txscript.GenerateSSGenVotes(c.VoteBits) + if err != nil { + errStr := fmt.Sprintf("SSGen votebits output failed to generate"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + blockVBOut := wire.NewTxOut(0, blockVBScript) + mtx.AddTxOut(blockVBOut) + + // Add all the SSGen-tagged transaction outputs to the transaction after + // performing some validity checks. + for i, ssgenPkh := range ssgenPkhs { + // Ensure amount is in the valid range for monetary amounts. + if ssgenCalcAmts[i] <= 0 || ssgenCalcAmts[i] > dcrutil.MaxAmount { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, + Message: "Invalid amount", + } + } + + // Create a new script which pays to the provided address specified in + // the original ticket tx. + var ssgenOutScript []byte + switch ssgenPayTypes[i] { + case false: // P2PKH + ssgenOutScript, err = txscript.PayToSSGenPKHDirect(ssgenPkh) + if err != nil { + errStr := fmt.Sprintf("SSGen out generation failure, pkh bad"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + case true: // P2SH + ssgenOutScript, err = txscript.PayToSSGenSHDirect(ssgenPkh) + if err != nil { + errStr := fmt.Sprintf("SSGen out generation failure, sh bad"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + } + + // Add the txout to our SSGen tx. + txOut := wire.NewTxOut(ssgenCalcAmts[i], ssgenOutScript) + mtx.AddTxOut(txOut) + } + + // Check to make sure our SSGen was created correctly. + ssgenTx := dcrutil.NewTx(mtx) + _, err = stake.IsSSGen(ssgenTx) + if err != nil { + return nil, err + } + + // Return the serialized and hex-encoded transaction. + mtxHex, err := messageToHex(mtx) + if err != nil { + return nil, err + } + return mtxHex, nil +} + +// handleCreateRawSSRtx handles createrawssrtx commands. +func handleCreateRawSSRtx(s *rpcServer, + cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*dcrjson.CreateRawSSRtxCmd) + + // Only a single SStx should be given + if len(c.Inputs) != 1 { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, SSRtx tx only have one valid input", + } + } + + // 1. Fetch the SStx, then calculate all the values we'll need later for + // the generation of the SSGen tx outputs. + // + // Convert the provided transaction hash hex to a ShaHash. + txSha, err := chainhash.NewHashFromStr(c.Inputs[0].Txid) + if err != nil { + rpcsLog.Errorf("Error generating sha: %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, + Message: "c.Inputs[0].Txid must be a hexadecimal string", + } + } + + // Try to fetch the transaction from the memory pool and if that fails, + // try the block database. + var sstxmtx *wire.MsgTx + tx, err := s.server.txMemPool.FetchTransaction(txSha) + if err != nil { + txList, err := s.server.db.FetchTxBySha(txSha) + if err != nil { + rpcsLog.Errorf("Error fetching tx: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, + Message: "No information available about transaction", + } + } + if len(txList) == 0 { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, + Message: "No information available about transaction", + } + } + + lastTx := len(txList) - 1 + sstxmtx = txList[lastTx].Tx + } else { + sstxmtx = tx.MsgTx() + } + + // Store the sstx pubkeyhashes and amounts as found in the transaction + // outputs. + sstx := dcrutil.NewTx(sstxmtx) + ssrtxPayTypes, ssrtxPkhs, sstxAmts, _, _, _ := + stake.GetSStxStakeOutputInfo(sstx) + + // 2. Add all transaction inputs to a new transaction after performing + // some validity checks; the only input for an SSRtx is an OP_SSTX tagged + // output. + mtx := wire.NewMsgTx() + for _, input := range c.Inputs { + txHash, err := chainhash.NewHashFromStr(input.Txid) + if err != nil { + return nil, dcrjson.NewRPCError(dcrjson.ErrRPCDecodeHexString, + fmt.Sprintf("Argument must be hexadecimal string")) + } + + if input.Vout < 0 { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, vout must be positive", + } + } + + if !(int8(input.Tree) == dcrutil.TxTreeStake) { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: "Invalid parameter, tx tree of sstx input must be stake", + } + } + + prevOut := wire.NewOutPoint(txHash, uint32(input.Vout), int8(input.Tree)) + txIn := wire.NewTxIn(prevOut, []byte{}) + mtx.AddTxIn(txIn) + } + + // 3. Add all the OP_SSRTX tagged outputs. + + // Add all the SSRtx-tagged transaction outputs to the transaction after + // performing some validity checks. + for i, ssrtxPkh := range ssrtxPkhs { + // Ensure amount is in the valid range for monetary amounts. + if sstxAmts[i] <= 0 || sstxAmts[i] > dcrutil.MaxAmount { + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, + Message: "Invalid amount", + } + } + + // Create a new script which pays to the provided address specified in + // the original ticket tx. + var ssrtxOutScript []byte + switch ssrtxPayTypes[i] { + case false: // P2PKH + ssrtxOutScript, err = txscript.PayToSSRtxPKHDirect(ssrtxPkh) + if err != nil { + errStr := fmt.Sprintf("SSRtx out generation failure, pkh bad"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + case true: // P2SH + ssrtxOutScript, err = txscript.PayToSSRtxSHDirect(ssrtxPkh) + if err != nil { + errStr := fmt.Sprintf("SSRtx out generation failure, sh bad"+ + ": %v", err) + return nil, dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, + Message: errStr, + } + } + } + + // Add the txout to our SSGen tx. + txOut := wire.NewTxOut(sstxAmts[i], ssrtxOutScript) + mtx.AddTxOut(txOut) + } + + // Check to make sure our SSGen was created correctly. + ssrtxTx := dcrutil.NewTx(mtx) + _, err = stake.IsSSRtx(ssrtxTx) + if err != nil { + //mtxHex, err := messageToHex(mtx) + //str := fmt.Sprintf("raw ssrtx tx output: %v", mtxHex) + //panic(str) + return nil, err + } + + // Return the serialized and hex-encoded transaction. mtxHex, err := messageToHex(mtx) if err != nil { return nil, err @@ -591,7 +1204,7 @@ func handleCreateRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // handleDebugLevel handles debuglevel commands. func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.DebugLevelCmd) + c := cmd.(*dcrjson.DebugLevelCmd) // Special show command to list supported subsystems. if c.LevelSpec == "show" { @@ -601,8 +1214,8 @@ func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) err := parseAndSetDebugLevels(c.LevelSpec) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParams.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParams.Code, Message: err.Error(), } } @@ -612,24 +1225,28 @@ func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // createVinList returns a slice of JSON objects for the inputs of the passed // transaction. -func createVinList(mtx *wire.MsgTx) []btcjson.Vin { - tx := btcutil.NewTx(mtx) - vinList := make([]btcjson.Vin, len(mtx.TxIn)) +func createVinList(mtx *wire.MsgTx) []dcrjson.Vin { + tx := dcrutil.NewTx(mtx) + vinList := make([]dcrjson.Vin, len(mtx.TxIn)) for i, v := range mtx.TxIn { if blockchain.IsCoinBase(tx) { vinList[i].Coinbase = hex.EncodeToString(v.SignatureScript) } else { vinList[i].Txid = v.PreviousOutPoint.Hash.String() vinList[i].Vout = v.PreviousOutPoint.Index + vinList[i].Tree = v.PreviousOutPoint.Tree // The disassembled string will contain [error] inline // if the script doesn't fully parse, so ignore the // error here. disbuf, _ := txscript.DisasmString(v.SignatureScript) - vinList[i].ScriptSig = new(btcjson.ScriptSig) + vinList[i].ScriptSig = new(dcrjson.ScriptSig) vinList[i].ScriptSig.Asm = disbuf vinList[i].ScriptSig.Hex = hex.EncodeToString(v.SignatureScript) } + vinList[i].AmountIn = v.ValueIn + vinList[i].BlockHeight = v.BlockHeight + vinList[i].BlockIndex = v.BlockIndex vinList[i].Sequence = v.Sequence } @@ -638,11 +1255,12 @@ func createVinList(mtx *wire.MsgTx) []btcjson.Vin { // createVoutList returns a slice of JSON objects for the outputs of the passed // transaction. -func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []btcjson.Vout { - voutList := make([]btcjson.Vout, len(mtx.TxOut)) +func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []dcrjson.Vout { + voutList := make([]dcrjson.Vout, len(mtx.TxOut)) for i, v := range mtx.TxOut { voutList[i].N = uint32(i) - voutList[i].Value = float64(v.Value) / btcutil.SatoshiPerBitcoin + voutList[i].Value = float64(v.Value) / dcrutil.AtomsPerCoin + voutList[i].Version = v.Version // The disassembled string will contain [error] inline if the // script doesn't fully parse, so ignore the error here. @@ -654,7 +1272,7 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []btcjson.Vou // couldn't parse and there is no additional information about // it anyways. scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs( - v.PkScript, chainParams) + v.Version, v.PkScript, chainParams) voutList[i].ScriptPubKey.Type = scriptClass.String() voutList[i].ScriptPubKey.ReqSigs = int32(reqSigs) @@ -674,21 +1292,25 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params) []btcjson.Vou // createTxRawResult converts the passed transaction and associated parameters // to a raw transaction JSON object. func createTxRawResult(chainParams *chaincfg.Params, txHash string, - mtx *wire.MsgTx, blk *btcutil.Block, maxIdx int64, - blkHash *wire.ShaHash) (*btcjson.TxRawResult, error) { + mtx *wire.MsgTx, blk *dcrutil.Block, maxIdx int64, + blkHash *chainhash.Hash, blkHeight int64, + blkIdx uint32) (*dcrjson.TxRawResult, error) { mtxHex, err := messageToHex(mtx) if err != nil { return nil, err } - txReply := &btcjson.TxRawResult{ - Hex: mtxHex, - Txid: txHash, - Vout: createVoutList(mtx, chainParams), - Vin: createVinList(mtx), - Version: mtx.Version, - LockTime: mtx.LockTime, + txReply := &dcrjson.TxRawResult{ + Hex: mtxHex, + Txid: txHash, + Vout: createVoutList(mtx, chainParams), + Vin: createVinList(mtx), + Version: mtx.Version, + LockTime: mtx.LockTime, + Expiry: mtx.Expiry, + BlockHeight: blkHeight, + BlockIndex: blkIdx, } if blk != nil { @@ -707,7 +1329,7 @@ func createTxRawResult(chainParams *chaincfg.Params, txHash string, // handleDecodeRawTransaction handles decoderawtransaction commands. func handleDecodeRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.DecodeRawTransactionCmd) + c := cmd.(*dcrjson.DecodeRawTransactionCmd) // Deserialize the transaction. hexStr := c.HexTx @@ -721,17 +1343,18 @@ func handleDecodeRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan var mtx wire.MsgTx err = mtx.Deserialize(bytes.NewReader(serializedTx)) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: "TX decode failed: " + err.Error(), } } // Create and return the result. - txReply := btcjson.TxRawDecodeResult{ + txReply := dcrjson.TxRawDecodeResult{ Txid: mtx.TxSha().String(), Version: mtx.Version, Locktime: mtx.LockTime, + Expiry: mtx.Expiry, Vin: createVinList(&mtx), Vout: createVoutList(&mtx, s.server.chainParams), } @@ -740,7 +1363,7 @@ func handleDecodeRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan // handleDecodeScript handles decodescript commands. func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.DecodeScriptCmd) + c := cmd.(*dcrjson.DecodeScriptCmd) // Convert the hex script to bytes. hexStr := c.HexScript @@ -759,22 +1382,23 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // Get information about the script. // Ignore the error here since an error means the script couldn't parse // and there is no additinal information about it anyways. - scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script, - s.server.chainParams) + // TODO Replace magic version with argument passed to RPC call + scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs( + txscript.DefaultScriptVersion, script, s.server.chainParams) addresses := make([]string, len(addrs)) for i, addr := range addrs { addresses[i] = addr.EncodeAddress() } // Convert the script itself to a pay-to-script-hash address. - p2sh, err := btcutil.NewAddressScriptHash(script, s.server.chainParams) + p2sh, err := dcrutil.NewAddressScriptHash(script, s.server.chainParams) if err != nil { context := "Failed to convert script to pay-to-script-hash" return nil, internalRPCError(err.Error(), context) } // Generate and return the reply. - reply := btcjson.DecodeScriptResult{ + reply := dcrjson.DecodeScriptResult{ Asm: disbuf, ReqSigs: int32(reqSigs), Type: scriptClass.String(), @@ -784,24 +1408,73 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{} return reply, nil } +// handleEstimateFee implenents the estimatefee command. +// TODO this is a very basic implimentation. It should be +// modified to match the bitcoin-core one. +func handleEstimateFee(s *rpcServer, cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + + return 0.01, nil +} + +// handleExistsAddress implements the existsaddress command. +func handleExistsAddress(s *rpcServer, cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + if cfg.NoAddrIndex { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, + Message: "Address indexing must be enabled", + } + } + if !s.server.addrIndexer.IsCaughtUp() { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, + Message: "Address index has not yet caught up to the " + + "current best height", + } + } + + c := cmd.(*dcrjson.ExistsAddressCmd) + + // Attempt to decode the supplied address. + addr, err := dcrutil.DecodeAddress(c.Address, s.server.chainParams) + if err != nil { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, + Message: "Invalid address or key: " + err.Error(), + } + } + + var numRequested, numToSkip int + numToSkip = 0 + numRequested = 1 + + // Check the blockchain for the relevant address usage. + tlr, err := s.server.db.FetchTxsForAddr(addr, numToSkip, numRequested) + if err == nil && tlr != nil { + return &dcrjson.ExistsAddressResult{true}, nil + } + return &dcrjson.ExistsAddressResult{false}, nil +} + // handleGenerate handles generate commands. func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { // Respond with an error if there are no addresses to pay the // created blocks to. if len(cfg.miningAddrs) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "No payment addresses specified " + "via --miningaddr", } } - c := cmd.(*btcjson.GenerateCmd) + c := cmd.(*dcrjson.GenerateCmd) // Respond with an error if the client is requesting 0 blocks to be generated. if c.NumBlocks == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "Please request a nonzero number of blocks to generate.", } } @@ -811,8 +1484,8 @@ func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i blockHashes, err := s.server.cpuMiner.GenerateNBlocks(c.NumBlocks) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: err.Error(), } } @@ -828,9 +1501,9 @@ func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i // handleGetAddedNodeInfo handles getaddednodeinfo commands. func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetAddedNodeInfoCmd) + c := cmd.(*dcrjson.GetAddedNodeInfoCmd) - // Retrieve a list of persistent (added) peers from the bitcoin server + // Retrieve a list of persistent (added) peers from the decred server // and filter the list of peer per the specified address (if any). peers := s.server.AddedNodeInfo() if c.Node != nil { @@ -843,7 +1516,7 @@ func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan stru } } if !found { - return nil, &btcjson.RPCError{ + return nil, &dcrjson.RPCError{ Code: -24, // TODO: ErrRPCClientNodeNotAdded Message: "Node has not been added", } @@ -862,13 +1535,13 @@ func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan stru // With the dns flag, the result is an array of JSON objects which // include the result of DNS lookups for each peer. - results := make([]*btcjson.GetAddedNodeInfoResult, 0, len(peers)) + results := make([]*dcrjson.GetAddedNodeInfoResult, 0, len(peers)) for _, peer := range peers { // Set the "address" of the peer which could be an ip address // or a domain name. - var result btcjson.GetAddedNodeInfoResult + var result dcrjson.GetAddedNodeInfoResult result.AddedNode = peer.addr - result.Connected = btcjson.Bool(peer.Connected()) + result.Connected = dcrjson.Bool(peer.Connected()) // Split the address into host and port portions so we can do // a DNS lookup against the host. When no port is specified in @@ -881,7 +1554,7 @@ func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan stru // Do a DNS lookup for the address. If the lookup fails, just // use the host. var ipList []string - ips, err := btcdLookup(host) + ips, err := dcrdLookup(host) if err == nil { ipList = make([]string, 0, len(ips)) for _, ip := range ips { @@ -893,9 +1566,9 @@ func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan stru } // Add the addresses and connection info to the result. - addrs := make([]btcjson.GetAddedNodeInfoResultAddr, 0, len(ipList)) + addrs := make([]dcrjson.GetAddedNodeInfoResultAddr, 0, len(ipList)) for _, ip := range ipList { - var addr btcjson.GetAddedNodeInfoResultAddr + var addr dcrjson.GetAddedNodeInfoResultAddr addr.Address = ip addr.Connected = "false" if ip == host && peer.Connected() { @@ -916,13 +1589,13 @@ func handleGetBestBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // the best block. sha, height, err := s.server.db.NewestSha() if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBestBlockHash, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBestBlockHash, Message: "Error getting best block hash", } } - result := &btcjson.GetBestBlockResult{ + result := &dcrjson.GetBestBlockResult{ Hash: sha.String(), Height: int32(height), } @@ -934,8 +1607,8 @@ func handleGetBestBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan stru sha, _, err := s.server.db.NewestSha() if err != nil { rpcsLog.Errorf("Error getting newest sha: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBestBlockHash, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBestBlockHash, Message: "Error getting best block hash", } } @@ -965,20 +1638,27 @@ func getDifficultyRatio(bits uint32) float64 { // handleGetBlock implements the getblock command. func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetBlockCmd) + c := cmd.(*dcrjson.GetBlockCmd) - sha, err := wire.NewShaHashFromStr(c.Hash) + sha, err := chainhash.NewHashFromStr(c.Hash) if err != nil { return nil, rpcDecodeHexError(c.Hash) } - blk, err := s.server.db.FetchBlockBySha(sha) + + blk, err := s.server.blockManager.GetBlockFromHash(*sha) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockNotFound, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, Message: "Block not found", } } + blockInMainChain := false + _, err = s.server.db.FetchBlockBySha(sha) + if err == nil { + blockInMainChain = true + } + // When the verbose flag isn't set, simply return the network-serialized // block as a hex-encoded string. if c.Verbose != nil && !*c.Verbose { @@ -1000,6 +1680,9 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i return nil, internalRPCError(err.Error(), context) } idx := blk.Height() + if !blockInMainChain { + idx = -1 + } _, maxIdx, err := s.server.db.NewestSha() if err != nil { context := "Failed to get newest hash" @@ -1007,18 +1690,28 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i } blockHeader := &blk.MsgBlock().Header - blockReply := btcjson.GetBlockVerboseResult{ + sbitsFloat := float64(blockHeader.SBits) / dcrutil.AtomsPerCoin + blockReply := dcrjson.GetBlockVerboseResult{ Hash: c.Hash, Version: blockHeader.Version, MerkleRoot: blockHeader.MerkleRoot.String(), + StakeRoot: blockHeader.StakeRoot.String(), PreviousHash: blockHeader.PrevBlock.String(), Nonce: blockHeader.Nonce, + VoteBits: blockHeader.VoteBits, + FinalState: hex.EncodeToString(blockHeader.FinalState[:]), + Voters: blockHeader.Voters, + FreshStake: blockHeader.FreshStake, + Revocations: blockHeader.Revocations, + PoolSize: blockHeader.PoolSize, Time: blockHeader.Timestamp.Unix(), Confirmations: uint64(1 + maxIdx - idx), Height: idx, Size: int32(len(buf)), Bits: strconv.FormatInt(int64(blockHeader.Bits), 16), + SBits: sbitsFloat, Difficulty: getDifficultyRatio(blockHeader.Bits), + ExtraData: hex.EncodeToString(blockHeader.ExtraData[:]), } if c.VerboseTx == nil || !*c.VerboseTx { @@ -1029,26 +1722,49 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i } blockReply.Tx = txNames + + stransactions := blk.STransactions() + stxNames := make([]string, len(stransactions)) + for i, tx := range stransactions { + stxNames[i] = tx.Sha().String() + } + + blockReply.STx = stxNames } else { txns := blk.Transactions() - rawTxns := make([]btcjson.TxRawResult, len(txns)) + rawTxns := make([]dcrjson.TxRawResult, len(txns)) for i, tx := range txns { txHash := tx.Sha().String() mtx := tx.MsgTx() rawTxn, err := createTxRawResult(s.server.chainParams, - txHash, mtx, blk, maxIdx, sha) + txHash, mtx, blk, maxIdx, sha, blk.Height(), uint32(i)) if err != nil { return nil, err } rawTxns[i] = *rawTxn } blockReply.RawTx = rawTxns + + stxns := blk.STransactions() + rawSTxns := make([]dcrjson.TxRawResult, len(stxns)) + for i, tx := range stxns { + txHash := tx.Sha().String() + mtx := tx.MsgTx() + + rawSTxn, err := createTxRawResult(s.server.chainParams, + txHash, mtx, blk, maxIdx, sha, blk.Height(), uint32(i)) + if err != nil { + return nil, err + } + rawSTxns[i] = *rawSTxn + } + blockReply.RawSTx = rawSTxns } // Get next block unless we are already at the top. - if idx < maxIdx { - var shaNext *wire.ShaHash + if idx < maxIdx && idx >= 0 { + var shaNext *chainhash.Hash shaNext, err = s.server.db.FetchBlockShaByHeight(int64(idx + 1)) if err != nil { context := "No next block" @@ -1065,8 +1781,8 @@ func handleGetBlockCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{ _, maxIdx, err := s.server.db.NewestSha() if err != nil { rpcsLog.Errorf("Error getting newest sha: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockCount, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockCount, Message: "Error getting block count: " + err.Error(), } } @@ -1076,11 +1792,11 @@ func handleGetBlockCount(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleGetBlockHash implements the getblockhash command. func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetBlockHashCmd) + c := cmd.(*dcrjson.GetBlockHashCmd) sha, err := s.server.db.FetchBlockShaByHeight(c.Index) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCOutOfRange, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCOutOfRange, Message: "Block number out of range", } } @@ -1090,7 +1806,7 @@ func handleGetBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // encodeTemplateID encodes the passed details into an ID that can be used to // uniquely identify a block template. -func encodeTemplateID(prevHash *wire.ShaHash, lastGenerated time.Time) string { +func encodeTemplateID(prevHash *chainhash.Hash, lastGenerated time.Time) string { return fmt.Sprintf("%s-%d", prevHash.String(), lastGenerated.Unix()) } @@ -1099,13 +1815,13 @@ func encodeTemplateID(prevHash *wire.ShaHash, lastGenerated time.Time) string { // that are using long polling for block templates. The ID consists of the // previous block hash for the associated template and the time the associated // template was generated. -func decodeTemplateID(templateID string) (*wire.ShaHash, int64, error) { +func decodeTemplateID(templateID string) (*chainhash.Hash, int64, error) { fields := strings.Split(templateID, "-") if len(fields) != 2 { return nil, 0, errors.New("invalid longpollid format") } - prevHash, err := wire.NewShaHashFromStr(fields[0]) + prevHash, err := chainhash.NewHashFromStr(fields[0]) if err != nil { return nil, 0, errors.New("invalid longpollid format") } @@ -1121,7 +1837,7 @@ func decodeTemplateID(templateID string) (*wire.ShaHash, int64, error) { // notified when block templates are stale. // // This function MUST be called with the state locked. -func (state *gbtWorkState) notifyLongPollers(latestHash *wire.ShaHash, lastGenerated time.Time) { +func (state *gbtWorkState) notifyLongPollers(latestHash *chainhash.Hash, lastGenerated time.Time) { // Notify anything that is waiting for a block template update from a // hash which is not the hash of the tip of the best chain since their // work is now invalid. @@ -1168,7 +1884,7 @@ func (state *gbtWorkState) notifyLongPollers(latestHash *wire.ShaHash, lastGener // NotifyBlockConnected uses the newly-connected block to notify any long poll // clients with a new block template when their existing block template is // stale due to the newly connected block. -func (state *gbtWorkState) NotifyBlockConnected(blockSha *wire.ShaHash) { +func (state *gbtWorkState) NotifyBlockConnected(blockSha *chainhash.Hash) { go func() { state.Lock() defer state.Unlock() @@ -1207,7 +1923,7 @@ func (state *gbtWorkState) NotifyMempoolTx(lastUpdated time.Time) { // without requiring a different channel for each client. // // This function MUST be called with the state locked. -func (state *gbtWorkState) templateUpdateChan(prevHash *wire.ShaHash, lastGenerated int64) chan struct{} { +func (state *gbtWorkState) templateUpdateChan(prevHash *chainhash.Hash, lastGenerated int64) chan struct{} { // Either get the current list of channels waiting for updates about // changes to block template for the previous hash or create a new one. channels, ok := state.notifyMap[*prevHash] @@ -1268,7 +1984,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo // Choose a payment address at random if the caller requests a // full coinbase as opposed to only the pertinent details needed // to create their own coinbase. - var payAddr btcutil.Address + var payAddr dcrutil.Address if !useCoinbaseValue { payAddr = cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))] } @@ -1283,6 +1999,11 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo return internalRPCError("Failed to create new block "+ "template: "+err.Error(), "") } + if blkTemplate == nil { + return internalRPCError("Failed to create new block "+ + "template: not enough voters on parent and no "+ + "suitable cached template", "") + } template = blkTemplate msgBlock = template.block targetDifficulty = fmt.Sprintf("%064x", @@ -1300,7 +2021,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo // Update work state to ensure another block template isn't // generated until needed. - state.template = template + state.template = deepCopyBlockTemplate(template) state.lastGenerated = time.Now() state.lastTxUpdate = lastTxUpdate state.prevHash = latestHash @@ -1342,7 +2063,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo template.validPayAddress = true // Update the merkle root. - block := btcutil.NewBlock(template.block) + block := dcrutil.NewBlock(template.block) merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) template.block.Header.MerkleRoot = *merkles[len(merkles)-1] } @@ -1367,23 +2088,25 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo } // blockTemplateResult returns the current block template associated with the -// state as a btcjson.GetBlockTemplateResult that is ready to be encoded to JSON +// state as a dcrjson.GetBlockTemplateResult that is ready to be encoded to JSON // and returned to the caller. // // This function MUST be called with the state locked. -func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld *bool) (*btcjson.GetBlockTemplateResult, error) { +func (state *gbtWorkState) blockTemplateResult(bm *blockManager, + useCoinbaseValue bool, submitOld *bool) (*dcrjson.GetBlockTemplateResult, + error) { // Ensure the timestamps are still in valid range for the template. // This should really only ever happen if the local clock is changed // after the template is generated, but it's important to avoid serving // invalid block templates. - template := state.template + template := deepCopyBlockTemplate(state.template) msgBlock := template.block header := &msgBlock.Header adjustedTime := state.timeSource.AdjustedTime() maxTime := adjustedTime.Add(time.Second * blockchain.MaxTimeOffsetSeconds) if header.Timestamp.After(maxTime) { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCOutOfRange, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCOutOfRange, Message: fmt.Sprintf("The template time is after the "+ "maximum allowed time for a block - template "+ "time %v, maximum time %v", adjustedTime, @@ -1391,14 +2114,40 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld } } + // Sometimes requests have a faulty fees or sig ops count. If we need to, + // we can recalculate these. + recalculateFeesAndSigsOps := true + if len(msgBlock.Transactions)+len(msgBlock.STransactions) == + len(template.fees) { + recalculateFeesAndSigsOps = false + } + if len(msgBlock.Transactions)+len(msgBlock.STransactions) == + len(template.sigOpCounts) { + recalculateFeesAndSigsOps = false + } + newestBlock, _ := bm.chainState.Best() + if newestBlock == nil { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBestBlockHash, + Message: fmt.Sprintf("The parent of the block on HEAD could " + + "not be found"), + } + } + // If we're mining on the parent with a cached template instead of on + // the newest block, we shouldn't recalculate fees and sigops. + if *newestBlock != msgBlock.Header.PrevBlock { + recalculateFeesAndSigsOps = false + } + // Convert each transaction in the block template to a template result // transaction. The result does not include the coinbase, so notice // the adjustments to the various lengths and indices. numTx := len(msgBlock.Transactions) - transactions := make([]btcjson.GetBlockTemplateResultTx, 0, numTx-1) - txIndex := make(map[wire.ShaHash]int64, numTx) + transactions := make([]dcrjson.GetBlockTemplateResultTx, 0, numTx-1) + txIndex := make(map[chainhash.Hash]int64, numTx) for i, tx := range msgBlock.Transactions { - txHash := tx.TxSha() + txHash := tx.TxShaFull() + txIndex[txHash] = int64(i) // Skip the coinbase transaction. @@ -1430,39 +2179,214 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld return nil, internalRPCError(err.Error(), context) } - resultTx := btcjson.GetBlockTemplateResultTx{ + var txTypeStr string + tempTx := dcrutil.NewTx(tx) + txType := stake.DetermineTxType(tempTx) + switch txType { + case stake.TxTypeRegular: + txTypeStr = "regular" + case stake.TxTypeSStx: + txTypeStr = "error" + case stake.TxTypeSSGen: + txTypeStr = "error" + case stake.TxTypeSSRtx: + txTypeStr = "error" + } + + fee := int64(0) + sigOps := int64(0) + if !recalculateFeesAndSigsOps { + fee = template.fees[i] + sigOps = template.sigOpCounts[i] + } else { + txU := dcrutil.NewTx(tx) + isValid := dcrutil.IsFlagSet16( + template.block.Header.VoteBits, + dcrutil.BlockValid) + store, err := bm.FetchTransactionStore(txU, isValid) + if err != nil { + return nil, err + } + + fee, err = blockchain.CheckTransactionInputs(txU, + int64(template.block.Header.Height), + store, + true, // Ensure fraud proofs are correct + bm.server.chainParams) + if err != nil { + return nil, err + } + + isSSGen := false + numSigOps, err := blockchain.CountP2SHSigOps(txU, false, isSSGen, + store) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + return nil, err + } + + numSigOps += blockchain.CountSigOps(txU, false, isSSGen) + if numSigOps > maxSigOpsPerTx { + str := fmt.Sprintf("transaction %v has too many sigops: %d > %d", + txHash, numSigOps, maxSigOpsPerTx) + return nil, txRuleError(wire.RejectNonstandard, str) + } + sigOps = int64(numSigOps) + } + + resultTx := dcrjson.GetBlockTemplateResultTx{ Data: hex.EncodeToString(txBuf.Bytes()), Hash: txHash.String(), Depends: depends, - Fee: template.fees[i], - SigOps: template.sigOpCounts[i], + Fee: fee, + SigOps: sigOps, + TxType: txTypeStr, } transactions = append(transactions, resultTx) } + // Convert each stake transaction in the block template to a template + // result transaction. + numSTx := len(msgBlock.STransactions) + stransactions := make([]dcrjson.GetBlockTemplateResultTx, 0, numSTx) + stxIndex := make(map[chainhash.Hash]int64, numSTx) + for i, stx := range msgBlock.STransactions { + stxHash := stx.TxShaFull() + + stxIndex[stxHash] = int64(i) + + // Create an array of 1-based indices to transactions that come + // before this one in the transactions list which this one + // depends on. This is necessary since the created block must + // ensure proper ordering of the dependencies. A map is used + // before creating the final array to prevent duplicate entries + // when mutiple inputs reference the same transaction. + dependsMap := make(map[int64]struct{}) + for _, txIn := range stx.TxIn { + if idx, ok := stxIndex[txIn.PreviousOutPoint.Hash]; ok { + dependsMap[idx] = struct{}{} + } + } + depends := make([]int64, 0, len(dependsMap)) + for idx := range dependsMap { + depends = append(depends, idx) + } + + // Serialize the transaction for later conversion to hex. + txBuf := bytes.NewBuffer(make([]byte, 0, stx.SerializeSize())) + if err := stx.Serialize(txBuf); err != nil { + context := "Failed to serialize transaction" + return nil, internalRPCError(err.Error(), context) + } + + var txTypeStr string + tempStx := dcrutil.NewTx(stx) + txType := stake.DetermineTxType(tempStx) + switch txType { + case stake.TxTypeRegular: + txTypeStr = "error" + case stake.TxTypeSStx: + txTypeStr = "ticket" + case stake.TxTypeSSGen: + txTypeStr = "vote" + case stake.TxTypeSSRtx: + txTypeStr = "revocation" + } + + fee := int64(0) + sigOps := int64(0) + if !recalculateFeesAndSigsOps { + // Check bounds and throw an error if OOB. This should be + // looked into further, probably it's the result of a + // race. + // Decred TODO + allTxCount := len(msgBlock.Transactions) + + len(msgBlock.STransactions) + if allTxCount != len(template.fees) || + allTxCount != len(template.sigOpCounts) { + context := "failed to build template due to race" + return nil, internalRPCError(fmt.Sprintf("race in block "+ + "template data for getwork caused corruption"), context) + } + + fee = template.fees[i+len(msgBlock.Transactions)] + sigOps = template.sigOpCounts[i+len(msgBlock.Transactions)] + } else { + txU := dcrutil.NewTx(stx) + isValid := dcrutil.IsFlagSet16( + template.block.Header.VoteBits, + dcrutil.BlockValid) + store, err := bm.FetchTransactionStore(txU, isValid) + if err != nil { + return nil, err + } + + fee, err = blockchain.CheckTransactionInputs(txU, + int64(template.block.Header.Height), + store, + true, // Ensure fraud proofs are correct + bm.server.chainParams) + if err != nil { + return nil, err + } + + isSSGen := txType == stake.TxTypeSSGen + numSigOps, err := blockchain.CountP2SHSigOps(txU, false, isSSGen, + store) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + return nil, err + } + + numSigOps += blockchain.CountSigOps(txU, false, isSSGen) + if numSigOps > maxSigOpsPerTx { + str := fmt.Sprintf("transaction %v has too many sigops: %d > %d", + stxHash, numSigOps, maxSigOpsPerTx) + return nil, txRuleError(wire.RejectNonstandard, str) + } + sigOps = int64(numSigOps) + } + + resultTx := dcrjson.GetBlockTemplateResultTx{ + Data: hex.EncodeToString(txBuf.Bytes()), + Hash: stxHash.String(), + Depends: depends, + Fee: fee, + SigOps: sigOps, + TxType: txTypeStr, + } + stransactions = append(stransactions, resultTx) + } + + headerBytes, err := header.Bytes() + if err != nil { + return nil, err + } + // Generate the block template reply. Note that following mutations are // implied by the included or omission of fields: // Including MinTime -> time/decrement // Omitting CoinbaseTxn -> coinbase, generation targetDifficulty := fmt.Sprintf("%064x", blockchain.CompactToBig(header.Bits)) templateID := encodeTemplateID(state.prevHash, state.lastGenerated) - reply := btcjson.GetBlockTemplateResult{ - Bits: strconv.FormatInt(int64(header.Bits), 16), - CurTime: header.Timestamp.Unix(), - Height: template.height, - PreviousHash: header.PrevBlock.String(), - SigOpLimit: blockchain.MaxSigOpsPerBlock, - SizeLimit: wire.MaxBlockPayload, - Transactions: transactions, - Version: header.Version, - LongPollID: templateID, - SubmitOld: submitOld, - Target: targetDifficulty, - MinTime: state.minTimestamp.Unix(), - MaxTime: maxTime.Unix(), - Mutable: gbtMutableFields, - NonceRange: gbtNonceRange, - Capabilities: gbtCapabilities, + reply := dcrjson.GetBlockTemplateResult{ + Header: hex.EncodeToString(headerBytes), + SigOpLimit: blockchain.MaxSigOpsPerBlock, + SizeLimit: int64(bm.server.chainParams.MaximumBlockSize), + Transactions: transactions, + STransactions: stransactions, + LongPollID: templateID, + SubmitOld: submitOld, + Target: targetDifficulty, + MinTime: state.minTimestamp.Unix(), + MaxTime: maxTime.Unix(), + Mutable: gbtMutableFields, + NonceRange: gbtNonceRange, + Capabilities: gbtCapabilities, } if useCoinbaseValue { reply.CoinbaseAux = gbtCoinbaseAux @@ -1471,8 +2395,8 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld // Ensure the template has a valid payment address associated // with it when a full coinbase is requested. if !template.validPayAddress { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "A coinbase transaction has been " + "requested, but the server has not " + "been configured with any payment " + @@ -1488,7 +2412,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld return nil, internalRPCError(err.Error(), context) } - resultTx := btcjson.GetBlockTemplateResultTx{ + resultTx := dcrjson.GetBlockTemplateResultTx{ Data: hex.EncodeToString(txBuf.Bytes()), Hash: tx.TxSha().String(), Depends: []int64{}, @@ -1528,7 +2452,8 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase // provided by the caller is invalid. prevHash, lastGenerated, err := decodeTemplateID(longPollID) if err != nil { - result, err := state.blockTemplateResult(useCoinbaseValue, nil) + result, err := state.blockTemplateResult(s.server.blockManager, + useCoinbaseValue, nil) if err != nil { state.Unlock() return nil, err @@ -1549,8 +2474,8 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase // old block template depending on whether or not a solution has // already been found and added to the block chain. submitOld := prevHash.IsEqual(prevTemplateHash) - result, err := state.blockTemplateResult(useCoinbaseValue, - &submitOld) + result, err := state.blockTemplateResult(s.server.blockManager, + useCoinbaseValue, &submitOld) if err != nil { state.Unlock() return nil, err @@ -1590,7 +2515,8 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase // block template depending on whether or not a solution has already // been found and added to the block chain. submitOld := prevHash.IsEqual(&state.template.block.Header.PrevBlock) - result, err := state.blockTemplateResult(useCoinbaseValue, &submitOld) + result, err := state.blockTemplateResult(s.server.blockManager, + useCoinbaseValue, &submitOld) if err != nil { return nil, err } @@ -1605,7 +2531,7 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase // in regards to whether or not it supports creating its own coinbase (the // coinbasetxn and coinbasevalue capabilities) and modifies the returned block // template accordingly. -func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) { +func handleGetBlockTemplateRequest(s *rpcServer, request *dcrjson.TemplateRequest, closeChan <-chan struct{}) (interface{}, error) { // Extract the relevant passed capabilities and restrict the result to // either a coinbase value or a coinbase transaction object depending on // the request. Default to only providing a coinbase value. @@ -1629,8 +2555,8 @@ func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateReques // When a coinbase transaction has been requested, respond with an error // if there are no addresses to pay the created block template to. if !useCoinbaseValue && len(cfg.miningAddrs) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "A coinbase transaction has been requested, " + "but the server has not been configured with " + "any payment addresses via --miningaddr", @@ -1641,19 +2567,19 @@ func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateReques // way to relay a found block or receive transactions to work on. // However, allow this state when running in the regression test or // simulation test mode. - if !(cfg.RegressionTest || cfg.SimNet) && s.server.ConnectedCount() == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCClientNotConnected, - Message: "Bitcoin is not connected", + if !cfg.SimNet && s.server.ConnectedCount() == 0 { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCClientNotConnected, + Message: "Decred is not connected", } } // No point in generating or accepting work before the chain is synced. _, currentHeight := s.server.blockManager.chainState.Best() if currentHeight != 0 && !s.server.blockManager.IsCurrent() { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCClientInInitialDownload, - Message: "Bitcoin is downloading blocks...", + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCClientInInitialDownload, + Message: "Decred is downloading blocks...", } } @@ -1679,12 +2605,13 @@ func handleGetBlockTemplateRequest(s *rpcServer, request *btcjson.TemplateReques if err := state.updateBlockTemplate(s, useCoinbaseValue); err != nil { return nil, err } - return state.blockTemplateResult(useCoinbaseValue, nil) + return state.blockTemplateResult(s.server.blockManager, useCoinbaseValue, nil) } -// chainErrToGBTErrString converts an error returned from btcchain to a string +// chainErrToGBTErrString converts an error returned from chain to a string // which matches the reasons and format described in BIP0022 for rejection // reasons. +// TODO Decred pop in the new errors from blockchain cj func chainErrToGBTErrString(err error) string { // When the passed error is not a RuleError, just return a generic // rejected string with the error text. @@ -1762,10 +2689,6 @@ func chainErrToGBTErrString(err error) string { return "bad-cb-length" case blockchain.ErrBadCoinbaseValue: return "bad-cb-value" - case blockchain.ErrMissingCoinbaseHeight: - return "bad-cb-height" - case blockchain.ErrBadCoinbaseHeight: - return "bad-cb-height" case blockchain.ErrScriptMalformed: return "bad-script-malformed" case blockchain.ErrScriptValidation: @@ -1779,11 +2702,11 @@ func chainErrToGBTErrString(err error) string { // deals with block proposals. // // See https://en.bitcoin.it/wiki/BIP_0023 for more details. -func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateRequest) (interface{}, error) { +func handleGetBlockTemplateProposal(s *rpcServer, request *dcrjson.TemplateRequest) (interface{}, error) { hexData := request.Data if hexData == "" { - return false, &btcjson.RPCError{ - Code: btcjson.ErrRPCType, + return false, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, Message: fmt.Sprintf("Data must contain the " + "hex-encoded serialized block that is being " + "proposed"), @@ -1796,20 +2719,20 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque } dataBytes, err := hex.DecodeString(hexData) if err != nil { - return false, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return false, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: fmt.Sprintf("Data must be "+ "hexadecimal string (not %q)", hexData), } } var msgBlock wire.MsgBlock if err := msgBlock.Deserialize(bytes.NewReader(dataBytes)); err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: "Block decode failed: " + err.Error(), } } - block := btcutil.NewBlock(&msgBlock) + block := dcrutil.NewBlock(&msgBlock) // Ensure the block is building from the expected previous block. expectedPrevHash, _ := s.server.blockManager.chainState.Best() @@ -1824,7 +2747,7 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque if _, ok := err.(blockchain.RuleError); !ok { err := rpcsLog.Errorf("Failed to process block "+ "proposal: %v", err) - return nil, &btcjson.RPCError{ + return nil, &dcrjson.RPCError{ Code: -25, // TODO: ErrRpcVerify Message: err.Error(), } @@ -1845,7 +2768,26 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque // See https://en.bitcoin.it/wiki/BIP_0022 and // https://en.bitcoin.it/wiki/BIP_0023 for more details. func handleGetBlockTemplate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetBlockTemplateCmd) + if s.server.cpuMiner.IsMining() { + err := rpcsLog.Errorf("Block template production is disallowed " + + "while CPU mining is enabled. Please disable CPU mining " + + "and try again.") + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, + Message: err.Error(), + } + } + + // Respond with an error if there are no addresses to pay the created + // blocks to. + if len(cfg.miningAddrs) == 0 { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, + Message: "No payment addresses specified via --miningaddr", + } + } + + c := cmd.(*dcrjson.GetBlockTemplateCmd) request := c.Request // Set the default mode and override it if supplied. @@ -1861,8 +2803,8 @@ func handleGetBlockTemplate(s *rpcServer, cmd interface{}, closeChan <-chan stru return handleGetBlockTemplateProposal(s, request) } - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "Invalid mode", } } @@ -1882,16 +2824,16 @@ func handleGetDifficulty(s *rpcServer, cmd interface{}, closeChan <-chan struct{ sha, _, err := s.server.db.NewestSha() if err != nil { rpcsLog.Errorf("Error getting sha: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDifficulty, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDifficulty, Message: "Error getting difficulty: " + err.Error(), } } blockHeader, err := s.server.db.FetchBlockHeaderBySha(sha) if err != nil { rpcsLog.Errorf("Error getting block: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDifficulty, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDifficulty, Message: "Error getting difficulty: " + err.Error(), } } @@ -1922,8 +2864,17 @@ func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in context := "Failed to get block" return nil, internalRPCError(err.Error(), context) } + var minTxRelayFee dcrutil.Amount + switch { + case s.server.chainParams == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeMainNet + case s.server.chainParams == &chaincfg.MainNetParams: + minTxRelayFee = minTxRelayFeeTestNet + default: + minTxRelayFee = minTxRelayFeeTestNet + } - ret := &btcjson.InfoChainResult{ + ret := &dcrjson.InfoChainResult{ Version: int32(1000000*appMajor + 10000*appMinor + 100*appPatch), ProtocolVersion: int32(maxProtocolVersion), Blocks: int32(height), @@ -1931,8 +2882,8 @@ func handleGetInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in Connections: s.server.ConnectedCount(), Proxy: cfg.Proxy, Difficulty: getDifficultyRatio(blkHeader.Bits), - TestNet: cfg.TestNet3, - RelayFee: float64(minTxRelayFee) / btcutil.SatoshiPerBitcoin, + TestNet: cfg.TestNet, + RelayFee: float64(minTxRelayFee) / dcrutil.AtomsPerCoin, } return ret, nil @@ -1959,7 +2910,7 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // Create a default getnetworkhashps command to use defaults and make // use of the existing getnetworkhashps handler. - gnhpsCmd := btcjson.NewGetNetworkHashPSCmd(nil, nil) + gnhpsCmd := dcrjson.NewGetNetworkHashPSCmd(nil, nil) networkHashesPerSecIface, err := handleGetNetworkHashPS(s, gnhpsCmd, closeChan) if err != nil { @@ -1967,23 +2918,24 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ } networkHashesPerSec, ok := networkHashesPerSecIface.(int64) if !ok { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "networkHashesPerSec is not an int64", } } - result := btcjson.GetMiningInfoResult{ + result := dcrjson.GetMiningInfoResult{ Blocks: height, CurrentBlockSize: uint64(len(blockBytes)), CurrentBlockTx: uint64(len(block.MsgBlock().Transactions)), Difficulty: getDifficultyRatio(block.MsgBlock().Header.Bits), + StakeDifficulty: block.MsgBlock().Header.SBits, Generate: s.server.cpuMiner.IsMining(), GenProcLimit: s.server.cpuMiner.NumWorkers(), HashesPerSec: int64(s.server.cpuMiner.HashesPerSecond()), NetworkHashPS: networkHashesPerSec, PooledTx: uint64(s.server.txMemPool.Count()), - TestNet: cfg.TestNet3, + TestNet: cfg.TestNet, } return &result, nil } @@ -1991,7 +2943,7 @@ func handleGetMiningInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleGetNetTotals implements the getnettotals command. func handleGetNetTotals(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { totalBytesRecv, totalBytesSent := s.server.NetTotals() - reply := &btcjson.GetNetTotalsResult{ + reply := &dcrjson.GetNetTotalsResult{ TotalBytesRecv: totalBytesRecv, TotalBytesSent: totalBytesSent, TimeMillis: time.Now().UTC().UnixNano() / int64(time.Millisecond), @@ -2005,7 +2957,7 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru // Literal zeros are inferred as int, and won't coerce to int64 // because the return value is an interface{}. - c := cmd.(*btcjson.GetNetworkHashPSCmd) + c := cmd.(*dcrjson.GetNetworkHashPSCmd) _, newestHeight, err := s.server.db.NewestSha() if err != nil { @@ -2032,13 +2984,19 @@ func handleGetNetworkHashPS(s *rpcServer, cmd interface{}, closeChan <-chan stru // blocks. When the passed value is negative, use the last block the // difficulty changed as the starting height. Also make sure the // starting height is not before the beginning of the chain. + + // Decred TODO: Make sure this blocksPerRetarget value is accurate + blocksPerRetarget := int64(s.server.chainParams.TargetTimespan / + s.server.chainParams.TimePerBlock) + numBlocks := int64(120) if c.Blocks != nil { numBlocks = int64(*c.Blocks) } + var startHeight int64 if numBlocks <= 0 { - startHeight = endHeight - ((endHeight % blockchain.BlocksPerRetarget) + 1) + startHeight = endHeight - ((endHeight % blocksPerRetarget) + 1) } else { startHeight = endHeight - numBlocks } @@ -2099,12 +3057,12 @@ func handleGetPeerInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // handleGetRawMempool implements the getrawmempool command. func handleGetRawMempool(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetRawMempoolCmd) + c := cmd.(*dcrjson.GetRawMempoolCmd) mp := s.server.txMemPool descs := mp.TxDescs() if c.Verbose != nil && *c.Verbose { - result := make(map[string]*btcjson.GetRawMempoolVerboseResult, + result := make(map[string]*dcrjson.GetRawMempoolVerboseResult, len(descs)) _, newestHeight, err := s.server.db.NewestSha() @@ -2127,9 +3085,9 @@ func handleGetRawMempool(s *rpcServer, cmd interface{}, closeChan <-chan struct{ newestHeight+1) } - mpd := &btcjson.GetRawMempoolVerboseResult{ + mpd := &dcrjson.GetRawMempoolVerboseResult{ Size: int32(desc.Tx.MsgTx().SerializeSize()), - Fee: btcutil.Amount(desc.Fee).ToBTC(), + Fee: dcrutil.Amount(desc.Fee).ToCoin(), Time: desc.Added.Unix(), Height: desc.Height, StartingPriority: startingPriority, @@ -2162,36 +3120,73 @@ func handleGetRawMempool(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // handleGetRawTransaction implements the getrawtransaction command. func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetRawTransactionCmd) + c := cmd.(*dcrjson.GetRawTransactionCmd) // Convert the provided transaction hash hex to a ShaHash. - txHash, err := wire.NewShaHashFromStr(c.Txid) + txHash, err := chainhash.NewHashFromStr(c.Txid) if err != nil { return nil, rpcDecodeHexError(c.Txid) } // Try to fetch the transaction from the memory pool and if that fails, // try the block database. + var maxIdx int64 var mtx *wire.MsgTx - var blkHash *wire.ShaHash + var blkHash *chainhash.Hash + var blkHeight int64 + var blkIndex uint32 + var tip *dcrutil.Block + needsVotes := false tx, err := s.server.txMemPool.FetchTransaction(txHash) if err != nil { - txList, err := s.server.db.FetchTxBySha(txHash) - if err != nil || len(txList) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCNoTxInfo, - Message: "No information available about transaction", + // Search the database. + if mtx == nil { + txList, err := s.server.db.FetchTxBySha(txHash) + if err != nil || len(txList) == 0 { + // Search the parent for transactions that need to be voted on. + tip, err = s.server.blockManager.GetTopBlockFromChain() + if err != nil { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, + Message: "No information available about top block", + } + } + + for i, tx := range tip.Transactions() { + if tx.Sha().IsEqual(txHash) { + mtx = tx.MsgTx() + blkHash = tip.Sha() + blkHeight = tip.Height() + blkIndex = uint32(i) + needsVotes = true + } + } + + // Can't find it anywhere, return an error. + if mtx == nil { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, + Message: "No information available about transaction", + } + } + } else { // It was successfully fetched from the DB. + lastTx := len(txList) - 1 + mtx = txList[lastTx].Tx + blkHash = txList[lastTx].BlkSha + blkHeight = txList[lastTx].Height + blkIndex = txList[lastTx].Index } } - - lastTx := len(txList) - 1 - mtx = txList[lastTx].Tx - - blkHash = txList[lastTx].BlkSha } else { mtx = tx.MsgTx() } + _, maxIdx, err = s.server.db.NewestSha() + if err != nil { + context := "Failed to get newest hash" + return nil, internalRPCError(err.Error(), context) + } + // When the verbose flag isn't set, simply return the network-serialized // transaction as a hex-encoded string. if c.Verbose == nil || *c.Verbose == 0 { @@ -2206,33 +3201,53 @@ func handleGetRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan str return mtxHex, nil } - var blk *btcutil.Block - var maxIdx int64 + var blk *dcrutil.Block if blkHash != nil { - blk, err = s.server.db.FetchBlockBySha(blkHash) - if err != nil { - rpcsLog.Errorf("Error fetching sha: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockNotFound, - Message: "Block not found: " + err.Error(), + if needsVotes { + blk = tip + } else { + blk, err = s.server.db.FetchBlockBySha(blkHash) + if err != nil { + rpcsLog.Errorf("Error fetching sha: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, + Message: "Block not found: " + err.Error(), + } } } - - _, maxIdx, err = s.server.db.NewestSha() - if err != nil { - context := "Failed to get newest hash" - return nil, internalRPCError(err.Error(), context) - } } rawTxn, err := createTxRawResult(s.server.chainParams, c.Txid, mtx, blk, - maxIdx, blkHash) + maxIdx, blkHash, blkHeight, blkIndex) if err != nil { return nil, err } return *rawTxn, nil } +// handleGetStakeDifficulty implements the getstakedifficulty command. +func handleGetStakeDifficulty(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + sha, _, err := s.server.db.NewestSha() + if err != nil { + rpcsLog.Errorf("Error getting sha: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDifficulty, + Message: "Error getting stake difficulty: " + err.Error(), + } + } + blockHeader, err := s.server.db.FetchBlockHeaderBySha(sha) + if err != nil { + rpcsLog.Errorf("Error getting block: %v", err) + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDifficulty, + Message: "Error getting stake difficulty: " + err.Error(), + } + } + sDiff := dcrutil.Amount(blockHeader.SBits) + + return &dcrjson.GetStakeDifficultyResult{sDiff.ToCoin()}, nil +} + // bigToLEUint256 returns the passed big integer as an unsigned 256-bit integer // encoded as little-endian bytes. Numbers which are larger than the max // unsigned 256-bit integer are truncated. @@ -2270,10 +3285,10 @@ func reverseUint32Array(b []byte) { // handleGetTxOut handles gettxout commands. func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetTxOutCmd) + c := cmd.(*dcrjson.GetTxOutCmd) // Convert the provided transaction hash hex to a ShaHash. - txHash, err := wire.NewShaHashFromStr(c.Txid) + txHash, err := chainhash.NewHashFromStr(c.Txid) if err != nil { return nil, rpcDecodeHexError(c.Txid) } @@ -2293,8 +3308,8 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i if includeMempool && s.server.txMemPool.HaveTransaction(txHash) { tx, err := s.server.txMemPool.FetchTransaction(txHash) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCNoTxInfo, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, Message: "No information available about transaction", } } @@ -2304,8 +3319,8 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i } else { txList, err := s.server.db.FetchTxBySha(txHash) if err != nil || len(txList) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCNoTxInfo, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, Message: "No information available about transaction", } } @@ -2327,8 +3342,8 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i } if c.Vout > uint32(len(mtx.TxOut)-1) { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidTxVout, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidTxVout, Message: "Ouput index number (vout) does not exist " + "for transaction.", } @@ -2359,30 +3374,42 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i // Get further info about the script. // Ignore the error here since an error means the script couldn't parse // and there is no additional information about it anyways. - scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script, - s.server.chainParams) + scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(txOut.Version, + script, s.server.chainParams) addresses := make([]string, len(addrs)) for i, addr := range addrs { addresses[i] = addr.EncodeAddress() } - txOutReply := &btcjson.GetTxOutResult{ + txOutReply := &dcrjson.GetTxOutResult{ BestBlock: bestBlockSha, Confirmations: confirmations, - Value: btcutil.Amount(txOut.Value).ToUnit(btcutil.AmountBTC), + Value: dcrutil.Amount(txOut.Value).ToUnit(dcrutil.AmountCoin), Version: mtx.Version, - ScriptPubKey: btcjson.ScriptPubKeyResult{ + ScriptPubKey: dcrjson.ScriptPubKeyResult{ Asm: disbuf, Hex: hex.EncodeToString(script), ReqSigs: int32(reqSigs), Type: scriptClass.String(), Addresses: addresses, }, - Coinbase: blockchain.IsCoinBase(btcutil.NewTx(mtx)), + Coinbase: blockchain.IsCoinBase(dcrutil.NewTx(mtx)), } return txOutReply, nil } +// pruneOldBlockTemplates prunes all old block templates from the templatePool +// map. Must be called with the RPC workstate locked to avoid races to the map. +func pruneOldBlockTemplates(s *rpcServer, bestHeight int64) { + pool := s.templatePool + for rootHash, blkData := range pool { + height := int64(blkData.msgBlock.Header.Height) + if height < bestHeight-getworkExpirationDiff { + delete(pool, rootHash) + } + } +} + // handleGetWorkRequest is a helper for handleGetWork which deals with // generating and returning work to the caller. // @@ -2397,16 +3424,21 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { lastTxUpdate := s.server.txMemPool.LastUpdated() latestHash, latestHeight := s.server.blockManager.chainState.Best() msgBlock := state.msgBlock + + // The current code pulls down a new template every second, however with a + // large mempool this will be pretty excruciating sometimes. It should examine + // whether or not a new template needs to be created based on the votes + // present every second or so, and then, if needed, generate a new block + // template. TODO cj if msgBlock == nil || state.prevHash == nil || !state.prevHash.IsEqual(latestHash) || (state.lastTxUpdate != lastTxUpdate && - time.Now().After(state.lastGenerated.Add(time.Minute))) { - - // Reset the extra nonce and clear all cached template + time.Now().After(state.lastGenerated.Add(time.Second))) { + // Reset the extra nonce and clear all expired cached template // variations if the best block changed. if state.prevHash != nil && !state.prevHash.IsEqual(latestHash) { state.extraNonce = 0 - state.blockInfo = make(map[wire.ShaHash]*workStateBlockInfo) + pruneOldBlockTemplates(s, latestHeight) } // Reset the previous best hash the block template was generated @@ -2422,7 +3454,16 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { context := "Failed to create new block template" return nil, internalRPCError(err.Error(), context) } - msgBlock = template.block + if template == nil { + // This happens if the template is returned nil because there + // are not enough voters on HEAD and there is currently an + // unsuitable parent cached template to try building off of. + context := "Failed to create new block template: not enough voters" + + " and failed to find a suitable parent template to build from" + return nil, internalRPCError("internal error", context) + } + templateCopy := deepCopyBlockTemplate(template) + msgBlock = templateCopy.block // Update work state to ensure another block template isn't // generated until needed. @@ -2432,13 +3473,17 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { state.prevHash = latestHash rpcsLog.Debugf("Generated block template (timestamp %v, extra "+ - "nonce %d, target %064x, merkle root %s, signature "+ - "script %x)", msgBlock.Header.Timestamp, + "nonce %d, target %064x, merkle root %s)", + msgBlock.Header.Timestamp, state.extraNonce, blockchain.CompactToBig(msgBlock.Header.Bits), - msgBlock.Header.MerkleRoot, - msgBlock.Transactions[0].TxIn[0].SignatureScript) + msgBlock.Header.MerkleRoot) } else { + if msgBlock == nil { + context := "Failed to create new block template, no previous state" + return nil, internalRPCError("internal error", context) + } + // At this point, there is a saved block template and a new // request for work was made, but either the available // transactions haven't change or it hasn't been long enough to @@ -2446,30 +3491,37 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { // existing block template and track the variations so each // variation can be regenerated if a caller finds an answer and // makes a submission against it. + templateCopy := deepCopyBlockTemplate(&BlockTemplate{ + block: msgBlock, + }) + msgBlock = templateCopy.block // Update the time of the block template to the current time // while accounting for the median time of the past several // blocks per the chain consensus rules. UpdateBlockTime(msgBlock, s.server.blockManager) - // Increment the extra nonce and update the block template - // with the new value by regenerating the coinbase script and - // setting the merkle root to the new value. - state.extraNonce++ - err := UpdateExtraNonce(msgBlock, latestHeight+1, state.extraNonce) - if err != nil { - errStr := fmt.Sprintf("Failed to update extra nonce: "+ - "%v", err) - return nil, internalRPCError(errStr, "") + if templateCopy.height > 1 { + // Increment the extra nonce and update the block template + // with the new value by regenerating the coinbase script and + // setting the merkle root to the new value. + ens := getCoinbaseExtranonces(msgBlock) + state.extraNonce++ + ens[0]++ + err := UpdateExtraNonce(msgBlock, latestHeight+1, ens) + if err != nil { + errStr := fmt.Sprintf("Failed to update extra nonce: "+ + "%v", err) + return nil, internalRPCError(errStr, "") + } } rpcsLog.Debugf("Updated block template (timestamp %v, extra "+ - "nonce %d, target %064x, merkle root %s, signature "+ - "script %x)", msgBlock.Header.Timestamp, + "nonce %d, target %064x, merkle root %s)", + msgBlock.Header.Timestamp, state.extraNonce, blockchain.CompactToBig(msgBlock.Header.Bits), - msgBlock.Header.MerkleRoot, - msgBlock.Transactions[0].TxIn[0].SignatureScript) + msgBlock.Header.MerkleRoot) } // In order to efficiently store the variations of block templates that @@ -2479,15 +3531,25 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { // submission, is used to rebuild the block before checking the // submitted solution. coinbaseTx := msgBlock.Transactions[0] - state.blockInfo[msgBlock.Header.MerkleRoot] = &workStateBlockInfo{ - msgBlock: msgBlock, - signatureScript: coinbaseTx.TxIn[0].SignatureScript, + if msgBlock.Header.Height > 1 { + s.templatePool[msgBlock.Header.MerkleRoot] = &workStateBlockInfo{ + msgBlock: msgBlock, + pkScript: coinbaseTx.TxOut[1].PkScript, + } + } else { + s.templatePool[msgBlock.Header.MerkleRoot] = &workStateBlockInfo{ + msgBlock: msgBlock, + } } // Serialize the block header into a buffer large enough to hold the // the block header and the internal sha256 padding that is added and // retuned as part of the data below. - data := make([]byte, 0, getworkDataLen) + // For reference: + // data[116] --> nBits + // data[136] --> Timestamp + // data[140] --> nonce + var data []byte buf := bytes.NewBuffer(data) err := msgBlock.Header.Serialize(buf) if err != nil { @@ -2495,34 +3557,15 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { return nil, internalRPCError(errStr, "") } - // Calculate the midstate for the block header. The midstate here is - // the internal state of the sha256 algorithm for the first chunk of the - // block header (sha256 operates on 64-byte chunks) which is before the - // nonce. This allows sophisticated callers to avoid hashing the first - // chunk over and over while iterating the nonce range. - data = data[:buf.Len()] - midstate := fastsha256.MidState256(data) - // Expand the data slice to include the full data buffer and apply the - // internal sha256 padding which consists of a single 1 bit followed - // by enough zeros to pad the message out to 56 bytes followed by the - // length of the message in bits encoded as a big-endian uint64 - // (8 bytes). Thus, the resulting length is a multiple of the sha256 - // block size (64 bytes). This makes the data ready for sophisticated - // caller to make use of only the second chunk along with the midstate - // for the first chunk. - data = data[:getworkDataLen] - data[wire.MaxBlockHeaderPayload] = 0x80 - binary.BigEndian.PutUint64(data[len(data)-8:], - wire.MaxBlockHeaderPayload*8) - - // Create the hash1 field which is a zero hash along with the internal - // sha256 padding as described above. This field is really quite - // useless, but it is required for compatibility with the reference - // implementation. - var hash1 [hash1Len]byte - hash1[wire.HashSize] = 0x80 - binary.BigEndian.PutUint64(hash1[len(hash1)-8:], wire.HashSize*8) + // internal padding. + data = append(data, buf.Bytes()...) + minimumDataSuffix := []byte{ + 0x80, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0xa0, + } + data = append(data, minimumDataSuffix...) // The final result reverses the each of the fields to little endian. // In particular, the data, hash1, and midstate fields are treated as @@ -2534,15 +3577,10 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) { // The fact the fields are reversed in this way is rather odd and likey // an artifact of some legacy internal state in the reference // implementation, but it is required for compatibility. - reverseUint32Array(data) - reverseUint32Array(hash1[:]) - reverseUint32Array(midstate[:]) target := bigToLEUint256(blockchain.CompactToBig(msgBlock.Header.Bits)) - reply := &btcjson.GetWorkResult{ - Data: hex.EncodeToString(data), - Hash1: hex.EncodeToString(hash1[:]), - Midstate: hex.EncodeToString(midstate[:]), - Target: hex.EncodeToString(target[:]), + reply := &dcrjson.GetWorkResult{ + Data: hex.EncodeToString(data), + Target: hex.EncodeToString(target[:]), } return reply, nil } @@ -2561,28 +3599,21 @@ func handleGetWorkSubmission(s *rpcServer, hexData string) (interface{}, error) return false, rpcDecodeHexError(hexData) } if len(data) != getworkDataLen { - return false, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return false, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: fmt.Sprintf("Argument must be "+ "%d bytes (not %d)", getworkDataLen, len(data)), } } - // Reverse the data as if it were an array of 32-bit unsigned integers. - // The fact the getwork request and submission data is reversed in this - // way is rather odd and likey an artifact of some legacy internal state - // in the reference implementation, but it is required for - // compatibility. - reverseUint32Array(data) - // Deserialize the block header from the data. var submittedHeader wire.BlockHeader bhBuf := bytes.NewReader(data[0:wire.MaxBlockHeaderPayload]) err = submittedHeader.Deserialize(bhBuf) if err != nil { - return false, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return false, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: fmt.Sprintf("Argument does not "+ "contain a valid block header: %v", err), } @@ -2591,23 +3622,33 @@ func handleGetWorkSubmission(s *rpcServer, hexData string) (interface{}, error) // Look up the full block for the provided data based on the // merkle root. Return false to indicate the solve failed if // it's not available. - state := s.workState - blockInfo, ok := state.blockInfo[submittedHeader.MerkleRoot] + blockInfo, ok := s.templatePool[submittedHeader.MerkleRoot] if !ok { - rpcsLog.Debugf("Block submitted via getwork has no matching "+ + rpcsLog.Errorf("Block submitted via getwork has no matching "+ "template for merkle root %s", submittedHeader.MerkleRoot) return false, nil } // Reconstruct the block using the submitted header stored block info. - msgBlock := blockInfo.msgBlock - block := btcutil.NewBlock(msgBlock) - msgBlock.Header.Timestamp = submittedHeader.Timestamp - msgBlock.Header.Nonce = submittedHeader.Nonce - msgBlock.Transactions[0].TxIn[0].SignatureScript = blockInfo.signatureScript - merkles := blockchain.BuildMerkleTreeStore(block.Transactions()) - msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + // A temporary block is used because we will be mutating the contents + // for the construction of the correct regular merkle tree. You must + // also deep copy the block itself because it could be accessed outside + // of the GW workstate mutexes once it gets submitted to the blockchain. + tempBlock := dcrutil.NewBlockDeepCopy(blockInfo.msgBlock) + msgBlock := tempBlock.MsgBlock() + msgBlock.Header = submittedHeader + if msgBlock.Header.Height > 1 { + pkScriptCopy := make([]byte, len(blockInfo.pkScript), + len(blockInfo.pkScript)) + copy(pkScriptCopy, blockInfo.pkScript) + msgBlock.Transactions[0].TxOut[1].PkScript = blockInfo.pkScript + merkles := blockchain.BuildMerkleTreeStore(tempBlock.Transactions()) + msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + } + + // The real block to submit, with a proper nonce and extraNonce. + block := dcrutil.NewBlockDeepCopyCoinbase(msgBlock) // Ensure the submitted block hash is less than the target difficulty. err = blockchain.CheckProofOfWork(block, activeNetParams.PowLimit) @@ -2620,18 +3661,11 @@ func handleGetWorkSubmission(s *rpcServer, hexData string) (interface{}, error) "") } - rpcsLog.Debugf("Block submitted via getwork does not meet "+ + rpcsLog.Errorf("Block submitted via getwork does not meet "+ "the required proof of work: %v", err) return false, nil } - latestHash, _ := s.server.blockManager.chainState.Best() - if !msgBlock.Header.PrevBlock.IsEqual(latestHash) { - rpcsLog.Debugf("Block submitted via getwork with previous "+ - "block %s is stale", msgBlock.Header.PrevBlock) - return false, nil - } - // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. isOrphan, err := s.server.blockManager.ProcessBlock(block, blockchain.BFNone) @@ -2654,13 +3688,21 @@ func handleGetWorkSubmission(s *rpcServer, hexData string) (interface{}, error) // handleGetWork implements the getwork command. func handleGetWork(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.GetWorkCmd) + if s.server.cpuMiner.IsMining() { + err := rpcsLog.Errorf("getwork polling is disallowed " + + "while CPU mining is enabled. Please disable CPU mining " + + "and try again.") + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, + Message: err.Error(), + } + } // Respond with an error if there are no addresses to pay the created // blocks to. if len(cfg.miningAddrs) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "No payment addresses specified via --miningaddr", } } @@ -2669,22 +3711,24 @@ func handleGetWork(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in // way to relay a found block or receive transactions to work on. // However, allow this state when running in the regression test or // simulation test mode. - if !(cfg.RegressionTest || cfg.SimNet) && s.server.ConnectedCount() == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCClientNotConnected, - Message: "Bitcoin is not connected", + if !cfg.SimNet && s.server.ConnectedCount() == 0 { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCClientNotConnected, + Message: "Decred is not connected", } } // No point in generating or accepting work before the chain is synced. _, currentHeight := s.server.blockManager.chainState.Best() if currentHeight != 0 && !s.server.blockManager.IsCurrent() { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCClientInInitialDownload, - Message: "Bitcoin is downloading blocks...", + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCClientInInitialDownload, + Message: "Decred is downloading blocks...", } } + c := cmd.(*dcrjson.GetWorkCmd) + // Protect concurrent access from multiple RPC invocations for work // requests and submission. s.workState.Lock() @@ -2703,7 +3747,7 @@ func handleGetWork(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in // handleHelp implements the help command. func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.HelpCmd) + c := cmd.(*dcrjson.HelpCmd) // Provide a usage overview of all commands when no specific command // was specified. @@ -2725,8 +3769,8 @@ func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter // for commands that are unimplemented or related to wallet // functionality. if _, ok := rpcHandlers[command]; !ok { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "Unknown command: " + command, } } @@ -2740,6 +3784,22 @@ func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter return help, nil } +func handleMissedTickets(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + mt, err := s.server.blockManager.MissedTickets() + if err != nil { + return nil, err + } + + mtString := make([]string, len(mt), len(mt)) + itr := 0 + for hash, _ := range mt { + mtString[itr] = hash.String() + itr++ + } + + return dcrjson.MissedTicketsResult{mtString}, nil +} + // handlePing implements the ping command. func handlePing(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { // Ask server to ping \o_ @@ -2753,29 +3813,91 @@ func handlePing(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (inter return nil, nil } +func handleRebroadcastMissed(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + hash, height := s.server.blockManager.chainState.Best() + mt, err := s.server.blockManager.MissedTickets() + if err != nil { + return nil, err + } + + stakeDiff, err := s.server.blockManager.CalcNextRequiredStakeDifficulty() + if err != nil { + return nil, err + } + + missedTicketsNtfn := &blockchain.TicketNotificationsData{ + *hash, + height, + stakeDiff, + mt, + } + + s.ntfnMgr.NotifySpentAndMissedTickets(missedTicketsNtfn) + + return nil, nil +} + +// handleRebroadcastWinners implements the rebroadcastwinners command. +func handleRebroadcastWinners(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + hash, height := s.server.blockManager.chainState.Best() + blocks, err := s.server.blockManager.GetGeneration(*hash) + if err != nil { + return nil, err + } + + s.server.blockManager.blockLotteryDataCacheMutex.Lock() + defer s.server.blockManager.blockLotteryDataCacheMutex.Unlock() + for _, b := range blocks { + lotteryData := new(BlockLotteryData) + exists := false + _, exists = s.server.blockManager.blockLotteryDataCache[b] + if !exists { + winningTickets, poolSize, finalState, err := + s.server.blockManager.GetLotteryData(b) + if err != nil { + return nil, err + } + lotteryData.finalState = finalState + lotteryData.poolSize = poolSize + lotteryData.ntfnData = &WinningTicketsNtfnData{ + b, + height, + winningTickets} + s.server.blockManager.blockLotteryDataCache[b] = lotteryData + } else { + lotteryData, _ = s.server.blockManager.blockLotteryDataCache[b] + } + + s.ntfnMgr.NotifyWinningTickets(lotteryData.ntfnData) + } + + return nil, nil +} + // handleSearchRawTransaction implements the searchrawtransactions command. -func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - if !cfg.AddrIndex { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCMisc, - Message: "Address index must be enabled (--addrindex)", +func handleSearchRawTransactions(s *rpcServer, cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + if cfg.NoAddrIndex { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, + Message: "Address indexing must be enabled", } } if !s.server.addrIndexer.IsCaughtUp() { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCMisc, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCMisc, Message: "Address index has not yet caught up to the " + "current best height", } } - c := cmd.(*btcjson.SearchRawTransactionsCmd) + c := cmd.(*dcrjson.SearchRawTransactionsCmd) // Attempt to decode the supplied address. - addr, err := btcutil.DecodeAddress(c.Address, s.server.chainParams) + addr, err := dcrutil.DecodeAddress(c.Address, s.server.chainParams) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Invalid address or key: " + err.Error(), } } @@ -2827,8 +3949,8 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan // If neither source yielded any results, then the address has never // been used. if len(addressTxs) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCNoTxInfo, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCNoTxInfo, Message: "No information available about transaction", } } @@ -2853,34 +3975,45 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan return nil, internalRPCError(err.Error(), context) } - rawTxns := make([]btcjson.TxRawResult, len(addressTxs), len(addressTxs)) + rawTxns := make([]dcrjson.TxRawResult, len(addressTxs), len(addressTxs)) for i, txReply := range addressTxs { txHash := txReply.Sha.String() mtx := txReply.Tx + mtxHash := mtx.TxSha() // Transactions grabbed from the mempool aren't yet // within a block. So we conditionally fetch a txs // embedded block here. This will be reflected in the // final JSON output (mempool won't have confirmations). - var blk *btcutil.Block + var blk *dcrutil.Block if txReply.BlkSha != nil { blk, err = s.server.db.FetchBlockBySha(txReply.BlkSha) if err != nil { rpcsLog.Errorf("Error fetching sha: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockNotFound, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, Message: "Block not found", } } } - var blkHash *wire.ShaHash + var blkHash *chainhash.Hash + var blkHeight int64 + var blkIndex uint32 if blk != nil { blkHash = blk.Sha() + blkHeight = blk.Height() + blkIndex = wire.NullBlockIndex + + for i, tx := range blk.Transactions() { + if tx.Sha().IsEqual(&mtxHash) { + blkIndex = uint32(i) + } + } } rawTxn, err := createTxRawResult(s.server.chainParams, - txHash, mtx, blk, maxIdx, blkHash) + txHash, mtx, blk, maxIdx, blkHash, blkHeight, blkIndex) if err != nil { return nil, err } @@ -2891,7 +4024,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan // handleSendRawTransaction implements the sendrawtransaction command. func handleSendRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.SendRawTransactionCmd) + c := cmd.(*dcrjson.SendRawTransactionCmd) // Deserialize and send off to tx relay hexStr := c.HexTx if len(hexStr)%2 != 0 { @@ -2904,14 +4037,14 @@ func handleSendRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan st msgtx := wire.NewMsgTx() err = msgtx.Deserialize(bytes.NewReader(serializedTx)) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: "TX decode failed: " + err.Error(), } } - tx := btcutil.NewTx(msgtx) - err = s.server.txMemPool.ProcessTransaction(tx, false, false) + tx := dcrutil.NewTx(msgtx) + err = s.server.blockManager.ProcessTransaction(tx, false, false) if err != nil { // When the error is a rule error, it means the transaction was // simply rejected as opposed to something actually going wrong, @@ -2926,8 +4059,8 @@ func handleSendRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan st rpcsLog.Errorf("Failed to process transaction %v: %v", tx.Sha(), err) } - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: "TX rejected: " + err.Error(), } } @@ -2942,7 +4075,7 @@ func handleSendRawTransaction(s *rpcServer, cmd interface{}, closeChan <-chan st // handleSetGenerate implements the setgenerate command. func handleSetGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.SetGenerateCmd) + c := cmd.(*dcrjson.SetGenerateCmd) // Disable generation regardless of the provided generate flag if the // maximum number of threads (goroutines for our purposes) is 0. @@ -2962,8 +4095,8 @@ func handleSetGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // Respond with an error if there are no addresses to pay the // created blocks to. if len(cfg.miningAddrs) == 0 { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInternal.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInternal.Code, Message: "No payment addresses specified " + "via --miningaddr", } @@ -2979,12 +4112,12 @@ func handleSetGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // handleStop implements the stop command. func handleStop(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { s.server.Stop() - return "btcd stopping.", nil + return "dcrd stopping.", nil } // handleSubmitBlock implements the submitblock command. func handleSubmitBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.SubmitBlockCmd) + c := cmd.(*dcrjson.SubmitBlockCmd) // Deserialize the submitted block. hexStr := c.HexBlock @@ -2996,10 +4129,10 @@ func handleSubmitBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) return nil, rpcDecodeHexError(hexStr) } - block, err := btcutil.NewBlockFromBytes(serializedBlock) + block, err := dcrutil.NewBlockFromBytes(serializedBlock) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDeserialization, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDeserialization, Message: "Block decode failed: " + err.Error(), } } @@ -3013,12 +4146,39 @@ func handleSubmitBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) return nil, nil } +// handleTicketsForAddress implements the command. +func handleTicketsForAddress(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*dcrjson.TicketsForAddressCmd) + + addr, err := dcrutil.DecodeAddress(c.Address, s.server.chainParams) + if err != nil { + return nil, err + } + + tickets, err := s.server.blockManager.TicketsForAddress(addr) + if err != nil { + return nil, err + } + + ticketStrings := make([]string, len(tickets), len(tickets)) + itr := 0 + for _, ticket := range tickets { + ticketStrings[itr] = ticket.String() + itr++ + } + + reply := &dcrjson.TicketsForAddressResult{ + Tickets: ticketStrings, + } + return reply, nil +} + // handleValidateAddress implements the validateaddress command. func handleValidateAddress(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.ValidateAddressCmd) + c := cmd.(*dcrjson.ValidateAddressCmd) - result := btcjson.ValidateAddressChainResult{} - addr, err := btcutil.DecodeAddress(c.Address, activeNetParams.Params) + result := dcrjson.ValidateAddressChainResult{} + addr, err := dcrutil.DecodeAddress(c.Address, activeNetParams.Params) if err != nil { // Return the default value (false) for IsValid. return result, nil @@ -3064,7 +4224,8 @@ func verifyChain(db database.Db, level, depth int32, timeSource blockchain.Media // Level 1 does basic chain sanity checks. if level > 0 { err := blockchain.CheckBlockSanity(block, - activeNetParams.PowLimit, timeSource) + timeSource, + activeNetParams.Params) if err != nil { rpcsLog.Errorf("Verify is unable to "+ "validate block at sha %v height "+ @@ -3080,7 +4241,7 @@ func verifyChain(db database.Db, level, depth int32, timeSource blockchain.Media // handleVerifyChain implements the verifychain command. func handleVerifyChain(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.VerifyChainCmd) + c := cmd.(*dcrjson.VerifyChainCmd) var checkLevel, checkDepth int32 if c.CheckLevel != nil { @@ -3097,21 +4258,21 @@ func handleVerifyChain(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) // handleVerifyMessage implements the verifymessage command. func handleVerifyMessage(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - c := cmd.(*btcjson.VerifyMessageCmd) + c := cmd.(*dcrjson.VerifyMessageCmd) // Decode the provided address. - addr, err := btcutil.DecodeAddress(c.Address, activeNetParams.Params) + addr, err := dcrutil.DecodeAddress(c.Address, activeNetParams.Params) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Invalid address or key: " + err.Error(), } } // Only P2PKH addresses are valid for signing. - if _, ok := addr.(*btcutil.AddressPubKeyHash); !ok { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCType, + if _, ok := addr.(*dcrutil.AddressPubKeyHash); !ok { + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCType, Message: "Address is not a pay-to-pubkey-hash address", } } @@ -3119,16 +4280,16 @@ func handleVerifyMessage(s *rpcServer, cmd interface{}, closeChan <-chan struct{ // Decode base64 signature. sig, err := base64.StdEncoding.DecodeString(c.Signature) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCParse.Code, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCParse.Code, Message: "Malformed base64 encoding: " + err.Error(), } } // Validate the signature - this just shows that it was valid at all. // we will compare it with the key next. - pk, wasCompressed, err := btcec.RecoverCompact(btcec.S256(), sig, - wire.DoubleSha256([]byte("Bitcoin Signed Message:\n"+c.Message))) + pk, wasCompressed, err := chainec.Secp256k1.RecoverCompact(sig, + chainhash.HashFuncB([]byte("Secp256k1 Signed Message:\n"+c.Message))) if err != nil { // Mirror Bitcoin Core behavior, which treats error in // RecoverCompact as invalid signature. @@ -3136,14 +4297,14 @@ func handleVerifyMessage(s *rpcServer, cmd interface{}, closeChan <-chan struct{ } // Reconstruct the pubkey hash. - btcPK := (*btcec.PublicKey)(pk) + dcrPK := (chainec.PublicKey)(pk) var serializedPK []byte if wasCompressed { - serializedPK = btcPK.SerializeCompressed() + serializedPK = dcrPK.SerializeCompressed() } else { - serializedPK = btcPK.SerializeUncompressed() + serializedPK = dcrPK.SerializeUncompressed() } - address, err := btcutil.NewAddressPubKey(serializedPK, + address, err := dcrutil.NewAddressSecpPubKey(serializedPK, activeNetParams.Params) if err != nil { // Again mirror Bitcoin Core behavior, which treats error in public key @@ -3171,6 +4332,7 @@ type rpcServer struct { listeners []net.Listener workState *workState gbtWorkState *gbtWorkState + templatePool map[chainhash.Hash]*workStateBlockInfo helpCacher *helpCacher quit chan int } @@ -3341,7 +4503,7 @@ type parsedRPCCmd struct { id interface{} method string cmd interface{} - err *btcjson.RPCError + err *dcrjson.RPCError } // standardCmdResult checks that a parsed command is a standard Bitcoin JSON-RPC @@ -3363,7 +4525,7 @@ func (s *rpcServer) standardCmdResult(cmd *parsedRPCCmd, closeChan <-chan struct handler = handleUnimplemented goto handled } - return nil, btcjson.ErrRPCMethodNotFound + return nil, dcrjson.ErrRPCMethodNotFound handled: return handler(s, cmd.cmd, closeChan) @@ -3373,26 +4535,26 @@ handled: // err field of the returned parsedRPCCmd struct will contain an RPC error that // is suitable for use in replies if the command is invalid in some way such as // an unregistered command or invalid parameters. -func parseCmd(request *btcjson.Request) *parsedRPCCmd { +func parseCmd(request *dcrjson.Request) *parsedRPCCmd { var parsedCmd parsedRPCCmd parsedCmd.id = request.ID parsedCmd.method = request.Method - cmd, err := btcjson.UnmarshalCmd(request) + cmd, err := dcrjson.UnmarshalCmd(request) if err != nil { // When the error is because the method is not registered, // produce a method not found RPC error. - if jerr, ok := err.(btcjson.Error); ok && - jerr.ErrorCode == btcjson.ErrUnregisteredMethod { + if jerr, ok := err.(dcrjson.Error); ok && + jerr.Code == dcrjson.ErrUnregisteredMethod { - parsedCmd.err = btcjson.ErrRPCMethodNotFound + parsedCmd.err = dcrjson.ErrRPCMethodNotFound return &parsedCmd } // Otherwise, some type of invalid parameters is the // cause, so produce the equivalent RPC error. - parsedCmd.err = btcjson.NewRPCError( - btcjson.ErrRPCInvalidParams.Code, err.Error()) + parsedCmd.err = dcrjson.NewRPCError( + dcrjson.ErrRPCInvalidParams.Code, err.Error()) return &parsedCmd } @@ -3402,18 +4564,18 @@ func parseCmd(request *btcjson.Request) *parsedRPCCmd { // createMarshalledReply returns a new marshalled JSON-RPC response given the // passed parameters. It will automatically convert errors that are not of -// the type *btcjson.RPCError to the appropriate type as needed. +// the type *dcrjson.RPCError to the appropriate type as needed. func createMarshalledReply(id, result interface{}, replyErr error) ([]byte, error) { - var jsonErr *btcjson.RPCError + var jsonErr *dcrjson.RPCError if replyErr != nil { - if jErr, ok := replyErr.(*btcjson.RPCError); ok { + if jErr, ok := replyErr.(*dcrjson.RPCError); ok { jsonErr = jErr } else { jsonErr = internalRPCError(replyErr.Error(), "") } } - return btcjson.MarshalResponse(id, result, jsonErr) + return dcrjson.MarshalResponse(id, result, jsonErr) } // jsonRPCRead handles reading and responding to RPC messages. @@ -3465,10 +4627,10 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, var responseID interface{} var jsonErr error var result interface{} - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(body, &request); err != nil { - jsonErr = &btcjson.RPCError{ - Code: btcjson.ErrRPCParse.Code, + jsonErr = &dcrjson.RPCError{ + Code: dcrjson.ErrRPCParse.Code, Message: "Failed to parse request: " + err.Error(), } } @@ -3496,8 +4658,8 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, // Check if the user is limited and set error if method unauthorized if !isAdmin { if _, ok := rpcLimited[request.Method]; !ok { - jsonErr = &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParams.Code, + jsonErr = &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParams.Code, Message: "limited user not authorized for this method", } } @@ -3535,7 +4697,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, // jsonAuthFail sends a message back to the client if the http auth is rejected. func jsonAuthFail(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", `Basic realm="btcd RPC"`) + w.Header().Add("WWW-Authenticate", `Basic realm="dcrd RPC"`) http.Error(w, "401 Unauthorized.", http.StatusUnauthorized) } @@ -3616,9 +4778,9 @@ func (s *rpcServer) Start() { func genCertPair(certFile, keyFile string) error { rpcsLog.Infof("Generating TLS certificates...") - org := "btcd autogenerated cert" + org := "dcrd autogenerated cert" validUntil := time.Now().Add(10 * 365 * 24 * time.Hour) - cert, key, err := btcutil.NewTLSCertPair(org, validUntil, nil) + cert, key, err := dcrutil.NewTLSCertPair(org, validUntil, nil) if err != nil { return err } @@ -3642,6 +4804,7 @@ func newRPCServer(listenAddrs []string, s *server) (*rpcServer, error) { server: s, statusLines: make(map[int]string), workState: newWorkState(), + templatePool: make(map[chainhash.Hash]*workStateBlockInfo), gbtWorkState: newGbtWorkState(s.timeSource), helpCacher: newHelpCacher(), quit: make(chan int), diff --git a/rpcserverhelp.go b/rpcserverhelp.go index f14250e0..ab86f3cb 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -1,4 +1,5 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,7 +11,7 @@ import ( "strings" "sync" - "github.com/btcsuite/btcd/btcjson" + "github.com/decred/dcrd/dcrjson" ) // helpDescsEnUS defines the English descriptions used for the help strings. @@ -20,7 +21,7 @@ var helpDescsEnUS = map[string]string{ "The levelspec can either a debug level or of the form:\n" + "=,=,...\n" + "The valid debug levels are trace, debug, info, warn, error, and critical.\n" + - "The valid subsystems are AMGR, ADXR, BCDB, BMGR, BTCD, CHAN, DISC, PEER, RPCS, SCRP, SRVR, and TXMP.\n" + + "The valid subsystems are AMGR, ADXR, BCDB, BMGR, DCRD, CHAN, DISC, PEER, RPCS, SCRP, SRVR, and TXMP.\n" + "Finally the keyword 'show' will return a list of the available subsystems.", "debuglevel-levelspec": "The debug level(s) to use or the keyword 'show'", "debuglevel--condition0": "levelspec!=show", @@ -42,6 +43,42 @@ var helpDescsEnUS = map[string]string{ // TransactionInput help. "transactioninput-txid": "The hash of the input transaction", "transactioninput-vout": "The specific output of the input transaction to redeem", + "transactioninput-tree": "The tree that the transaction input is located", + // TODO review cmd help messages for stake stuff + // CreateRawSSTxCmd help. + "createrawsstx--synopsis": "Returns a new transaction spending the provided inputs and sending to the provided addresses.\n" + + "The transaction inputs are not signed in the created transaction.\n" + + "The signrawtransaction RPC command provided by wallet must be used to sign the resulting transaction.", + "createrawsstx--result0": "Hex-encoded bytes of the serialized transaction", + "createrawsstx-inputs": "The inputs to the transaction of type sstxinput", + "sstxinput-txid": "Unspent tx output hash", + "sstxinput-vout": "Amount of utxo", + "sstxinput-amt": "Amount of utxu", + "sstxinput-tree": "Which tree utxo is located", + "createrawsstx-amount": "JSON object with the destination addresses as keys and amounts as values", + "createrawsstx-amount--key": "address", + "createrawsstx-amount--value": "n.nnn", + "createrawsstx-amount--desc": "The destination address as the key and the amount in DCR as the value", + "createrawsstx-couts": "Array of sstx commit outs to use of type SSTxCommitOut", + "sstxcommitout-addr": "Address to send sstx commit", + "sstxcommitout-commitamt": "Amount to commit", + "sstxcommitout-changeamt": "Amount for change", + "sstxcommitout-changeaddr": "Address for change", + + // CreateRawSSGenTxCmd help. + "createrawssgentx--synopsis": "Returns a new transaction spending the provided inputs and sending to the provided addresses.\n" + + "The transaction inputs are not signed in the created transaction.\n" + + "The signrawtransaction RPC command provided by wallet must be used to sign the resulting transaction.", + "createrawssgentx--result0": "Hex-encoded bytes of the serialized transaction", + "createrawssgentx-inputs": "The inputs to the transaction of type sstxinput", + "createrawssgentx-votebits": "The inputs to the transaction of type sstxinput", + + // CreateRawSSGenTxCmd help. + "createrawssrtx--synopsis": "Returns a new transaction spending the provided inputs and sending to the provided addresses.\n" + + "The transaction inputs are not signed in the created transaction.\n" + + "The signrawtransaction RPC command provided by wallet must be used to sign the resulting transaction.", + "createrawssrtx--result0": "Hex-encoded bytes of the serialized transaction", + "createrawssrtx-inputs": "The inputs to the transaction of type sstxinput", // CreateRawTransactionCmd help. "createrawtransaction--synopsis": "Returns a new transaction spending the provided inputs and sending to the provided addresses.\n" + @@ -51,7 +88,7 @@ var helpDescsEnUS = map[string]string{ "createrawtransaction-amounts": "JSON object with the destination addresses as keys and amounts as values", "createrawtransaction-amounts--key": "address", "createrawtransaction-amounts--value": "n.nnn", - "createrawtransaction-amounts--desc": "The destination address as the key and the amount in BTC as the value", + "createrawtransaction-amounts--desc": "The destination address as the key and the amount in DCR as the value", "createrawtransaction--result0": "Hex-encoded bytes of the serialized transaction", // ScriptSig help. @@ -59,23 +96,28 @@ var helpDescsEnUS = map[string]string{ "scriptsig-hex": "Hex-encoded bytes of the script", // Vin help. - "vin-coinbase": "The hex-encoded bytes of the signature script (coinbase txns only)", - "vin-txid": "The hash of the origin transaction (non-coinbase txns only)", - "vin-vout": "The index of the output being redeemed from the origin transaction (non-coinbase txns only)", - "vin-scriptSig": "The signature script used to redeem the origin transaction as a JSON object (non-coinbase txns only)", - "vin-sequence": "The script sequence number", + "vin-coinbase": "The hex-encoded bytes of the signature script (coinbase txns only)", + "vin-txid": "The hash of the origin transaction (non-coinbase txns only)", + "vin-vout": "The index of the output being redeemed from the origin transaction (non-coinbase txns only)", + "vin-scriptSig": "The signature script used to redeem the origin transaction as a JSON object (non-coinbase txns only)", + "vin-sequence": "The script sequence number", + "vin-tree": "The tree of the transaction", + "vin-blockindex": "The block idx of the origin transaction", + "vin-blockheight": "The block height of the origin transaction", + "vin-amountin": "The amount in", // ScriptPubKeyResult help. "scriptpubkeyresult-asm": "Disassembly of the script", "scriptpubkeyresult-hex": "Hex-encoded bytes of the script", "scriptpubkeyresult-reqSigs": "The number of required signatures", "scriptpubkeyresult-type": "The type of the script (e.g. 'pubkeyhash')", - "scriptpubkeyresult-addresses": "The bitcoin addresses associated with this script", + "scriptpubkeyresult-addresses": "The decred addresses associated with this script", // Vout help. - "vout-value": "The amount in BTC", + "vout-value": "The amount in DCR", "vout-n": "The index of this transaction output", "vout-scriptPubKey": "The public key script used to pay coins as a JSON object", + "vout-version": "The version of the vout", // TxRawDecodeResult help. "txrawdecoderesult-txid": "The hash of the transaction", @@ -83,6 +125,7 @@ var helpDescsEnUS = map[string]string{ "txrawdecoderesult-locktime": "The transaction lock time", "txrawdecoderesult-vin": "The transaction inputs as JSON objects", "txrawdecoderesult-vout": "The transaction outputs as JSON objects", + "txrawdecoderesult-expiry": "The transaction expiry", // DecodeRawTransactionCmd help. "decoderawtransaction--synopsis": "Returns a JSON object representing the provided serialized, hex-encoded transaction.", @@ -92,13 +135,20 @@ var helpDescsEnUS = map[string]string{ "decodescriptresult-asm": "Disassembly of the script", "decodescriptresult-reqSigs": "The number of required signatures", "decodescriptresult-type": "The type of the script (e.g. 'pubkeyhash')", - "decodescriptresult-addresses": "The bitcoin addresses associated with this script", + "decodescriptresult-addresses": "The decred addresses associated with this script", "decodescriptresult-p2sh": "The script hash for use in pay-to-script-hash transactions", // DecodeScriptCmd help. "decodescript--synopsis": "Returns a JSON object with information about the provided hex-encoded script.", "decodescript-hexscript": "Hex-encoded script", + // ExistsAddressCmd help. + "existsaddress--synopsis": "Test for the existance of the provided address", + "existsaddress-address": "The address to check", + + // ExistsAddressResult help. + "existsaddressresult-exists": "Bool showing if address exists or not", + // GenerateCmd help "generate--synopsis": "Generates a set number of blocks (simnet or regtest only) and returns a JSON\n" + " array of their hashes.", @@ -135,13 +185,14 @@ var helpDescsEnUS = map[string]string{ "getbestblockhash--result0": "The hex-encoded block hash", // GetBlockCmd help. - "getblock--synopsis": "Returns information about a block given its hash.", - "getblock-hash": "The hash of the block", - "getblock-verbose": "Specifies the block is returned as a JSON object instead of hex-encoded string", - "getblock-verbosetx": "Specifies that each transaction is returned as a JSON object and only applies if the verbose flag is true (btcd extension)", - "getblock--condition0": "verbose=false", - "getblock--condition1": "verbose=true", - "getblock--result0": "Hex-encoded bytes of the serialized block", + "getblock--synopsis": "Returns information about a block given its hash.", + "getblock-hash": "The hash of the block", + "getblock-verbose": "Specifies the block is returned as a JSON object instead of hex-encoded string", + "getblock-verbosetx": "Specifies that each transaction is returned as a JSON object and only applies if the verbose flag is true (dcrd extension)", + "getblock--condition0": "verbose=false", + "getblock--condition1": "verbose=true", + "getblock--result0": "Hex-encoded bytes of the serialized block", + "getblockverboseresult-extradata": "Extra data field for the requested block", // TxRawResult help. "txrawresult-hex": "Hex-encoded transaction", @@ -154,6 +205,9 @@ var helpDescsEnUS = map[string]string{ "txrawresult-confirmations": "Number of confirmations of the block", "txrawresult-time": "Transaction time in seconds since 1 Jan 1970 GMT", "txrawresult-blocktime": "Block time in seconds since the 1 Jan 1970 GMT", + "txrawresult-blockindex": "Index of the containing block.", + "txrawresult-blockheight": "Height of the block the transaction is part of", + "txrawresult-expiry": "The transacion expiry", // GetBlockVerboseResult help. "getblockverboseresult-hash": "The hash of the block (same as provided)", @@ -170,6 +224,19 @@ var helpDescsEnUS = map[string]string{ "getblockverboseresult-difficulty": "The proof-of-work difficulty as a multiple of the minimum difficulty", "getblockverboseresult-previousblockhash": "The hash of the previous block", "getblockverboseresult-nextblockhash": "The hash of the next block (only if there is one)", + "getblockverboseresult-sbits": "The stake difficulty of theblock", + "getblockverboseresult-poolsize": "The total number of valid, spendable sstx (tickets) in the chain", + "getblockverboseresult-revocations": "The number of new ssrtx (tickets) of the given block", + "getblockverboseresult-freshstake": "The number of new sstx (tickets) of the given block", + "getblockverboseresult-voters": "The number of stake voters (ssgen) of the previous block", + "getblockverboseresult-potential": "The number of potential", + "getblockverboseresult-overflow": "The number of overflow", + "getblockverboseresult-winner": "The winning bucket to determine ssgen", + "getblockverboseresult-votebits": "The block's voting results", + "getblockverboseresult-rawstx": "The block's raw sstx hashes the were included", + "getblockverboseresult-stx": "The block's sstx hashes the were included", + "getblockverboseresult-stakeroot": "The block's sstx hashes the were included", + "getblockverboseresult-finalstate": "The block's finalstate", // GetBlockCountCmd help. "getblockcount--synopsis": "Returns the number of blocks in the longest block chain.", @@ -195,8 +262,9 @@ var helpDescsEnUS = map[string]string{ "getblocktemplateresulttx-data": "Hex-encoded transaction data (byte-for-byte)", "getblocktemplateresulttx-hash": "Hex-encoded transaction hash (little endian if treated as a 256-bit number)", "getblocktemplateresulttx-depends": "Other transactions before this one (by 1-based index in the 'transactions' list) that must be present in the final block if this one is", - "getblocktemplateresulttx-fee": "Difference in value between transaction inputs and outputs (in Satoshi)", + "getblocktemplateresulttx-fee": "Difference in value between transaction inputs and outputs (in Atoms)", "getblocktemplateresulttx-sigops": "Total number of signature operations as counted for purposes of block limits", + "getblocktemplateresulttx-txtype": "Type of the transaction", // GetBlockTemplateResultAux help. "getblocktemplateresultaux-flags": "Hex-encoded byte-for-byte data to include in the coinbase signature script", @@ -212,7 +280,7 @@ var helpDescsEnUS = map[string]string{ "getblocktemplateresult-version": "The block version", "getblocktemplateresult-coinbaseaux": "Data that should be included in the coinbase signature script", "getblocktemplateresult-coinbasetxn": "Information about the coinbase transaction", - "getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Satoshi", + "getblocktemplateresult-coinbasevalue": "Total amount available for the coinbase in Atoms", "getblocktemplateresult-workid": "This value must be returned with result if provided (not provided)", "getblocktemplateresult-longpollid": "Identifier for long poll request which allows monitoring for expiration", "getblocktemplateresult-longpolluri": "An alternate URI to use for long poll requests if provided (not provided)", @@ -225,6 +293,8 @@ var helpDescsEnUS = map[string]string{ "getblocktemplateresult-noncerange": "Two concatenated hex-encoded big-endian 32-bit integers which represent the valid ranges of nonces the miner may scan", "getblocktemplateresult-capabilities": "List of server capabilities including 'proposal' to indicate support for block proposals", "getblocktemplateresult-reject-reason": "Reason the proposal was invalid as-is (only applies to proposal responses)", + "getblocktemplateresult-stransactions": "Stake transactions", + "getblocktemplateresult-header": "Block header", // GetBlockTemplateCmd help. "getblocktemplate--synopsis": "Returns a JSON object with information necessary to construct a block to mine or accepts a proposal to validate.\n" + @@ -240,13 +310,17 @@ var helpDescsEnUS = map[string]string{ "getconnectioncount--result0": "The number of connections", // GetCurrentNetCmd help. - "getcurrentnet--synopsis": "Get bitcoin network the server is running on.", + "getcurrentnet--synopsis": "Get decred network the server is running on.", "getcurrentnet--result0": "The network identifer", // GetDifficultyCmd help. "getdifficulty--synopsis": "Returns the proof-of-work difficulty as a multiple of the minimum difficulty.", "getdifficulty--result0": "The difficulty", + // GetStakeDifficultyCmd help. + "getstakedifficulty--synopsis": "Returns the proof-of-stake difficulty.", + "getstakedifficultyresult-difficulty": "The stake difficulty", + // GetGenerateCmd help. "getgenerate--synopsis": "Returns if the server is set to generate coins (mine) or not.", "getgenerate--result0": "True if mining, false if not", @@ -264,14 +338,14 @@ var helpDescsEnUS = map[string]string{ "infochainresult-proxy": "The proxy used by the server", "infochainresult-difficulty": "The current target difficulty", "infochainresult-testnet": "Whether or not server is using testnet", - "infochainresult-relayfee": "The minimum relay fee for non-free transactions in BTC/KB", + "infochainresult-relayfee": "The minimum relay fee for non-free transactions in DCR/KB", "infochainresult-errors": "Any current errors", // InfoWalletResult help. "infowalletresult-version": "The version of the server", "infowalletresult-protocolversion": "The latest supported protocol version", "infowalletresult-walletversion": "The version of the wallet server", - "infowalletresult-balance": "The total bitcoin balance of the wallet", + "infowalletresult-balance": "The total decred balance of the wallet", "infowalletresult-blocks": "The number of blocks processed", "infowalletresult-timeoffset": "The time offset", "infowalletresult-connections": "The number of connected peers", @@ -281,8 +355,8 @@ var helpDescsEnUS = map[string]string{ "infowalletresult-keypoololdest": "Seconds since 1 Jan 1970 GMT of the oldest pre-generated key in the key pool", "infowalletresult-keypoolsize": "The number of new keys that are pre-generated", "infowalletresult-unlocked_until": "The timestamp in seconds since 1 Jan 1970 GMT that the wallet is unlocked for transfers, or 0 if the wallet is locked", - "infowalletresult-paytxfee": "The transaction fee set in BTC/KB", - "infowalletresult-relayfee": "The minimum relay fee for non-free transactions in BTC/KB", + "infowalletresult-paytxfee": "The transaction fee set in DCR/KB", + "infowalletresult-relayfee": "The minimum relay fee for non-free transactions in DCR/KB", "infowalletresult-errors": "Any current errors", // GetInfoCmd help. @@ -300,6 +374,7 @@ var helpDescsEnUS = map[string]string{ "getmininginforesult-networkhashps": "Estimated network hashes per second for the most recent blocks", "getmininginforesult-pooledtx": "Number of transactions in the memory pool", "getmininginforesult-testnet": "Whether or not server is using testnet", + "getmininginforesult-stakedifficulty": "Current estimated stake difficulty", // GetMiningInfoCmd help. "getmininginfo--synopsis": "Returns a JSON object containing mining-related information.", @@ -344,7 +419,7 @@ var helpDescsEnUS = map[string]string{ // GetRawMempoolVerboseResult help. "getrawmempoolverboseresult-size": "Transaction size in bytes", - "getrawmempoolverboseresult-fee": "Transaction fee in bitcoins", + "getrawmempoolverboseresult-fee": "Transaction fee in decred", "getrawmempoolverboseresult-time": "Local time transaction entered pool in seconds since 1 Jan 1970 GMT", "getrawmempoolverboseresult-height": "Block height when transaction entered the pool", "getrawmempoolverboseresult-startingpriority": "Priority when transaction entered the pool", @@ -369,7 +444,7 @@ var helpDescsEnUS = map[string]string{ // GetTxOutResult help. "gettxoutresult-bestblock": "The block hash that contains the transaction output", "gettxoutresult-confirmations": "The number of confirmations", - "gettxoutresult-value": "The transaction amount in BTC", + "gettxoutresult-value": "The transaction amount in DCR", "gettxoutresult-scriptPubKey": "The public key script used to pay coins as a JSON object", "gettxoutresult-version": "The transaction version", "gettxoutresult-coinbase": "Whether or not the transaction is a coinbase", @@ -405,13 +480,19 @@ var helpDescsEnUS = map[string]string{ "ping--synopsis": "Queues a ping to be sent to each connected peer.\n" + "Ping times are provided by getpeerinfo via the pingtime and pingwait fields.", + // RebroadcastMissed help. + "rebroadcastmissed--synopsis": "Asks the daemon to rebroadcast missed votes.\n", + + // RebroadcastWinnerCmd help. + "rebroadcastwinners--synopsis": "Asks the daemon to rebroadcast the winners of the voting lottery.\n", + // SearchRawTransactionsCmd help. "searchrawtransactions--synopsis": "Returns raw data for transactions involving the passed address.\n" + "Returned transactions are pulled from both the database, and transactions currently in the mempool.\n" + "Transactions pulled from the mempool will have the 'confirmations' field set to 0.\n" + "Usage of this RPC requires the optional --addrindex flag to be activated, otherwise all responses will simply return with an error stating the address index has not yet been built.\n" + "Similarly, until the address index has caught up with the current best height, all requests will return an error response in order to avoid serving stale data.", - "searchrawtransactions-address": "The Bitcoin address to search for", + "searchrawtransactions-address": "The Decred address to search for", "searchrawtransactions-verbose": "Specifies the transaction is returned as a JSON object instead of hex-encoded string", "searchrawtransactions-skip": "The number of leading transactions to leave out of the final response", "searchrawtransactions-count": "The maximum number of transactions to return", @@ -422,7 +503,7 @@ var helpDescsEnUS = map[string]string{ // SendRawTransactionCmd help. "sendrawtransaction--synopsis": "Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.", "sendrawtransaction-hextx": "Serialized, hex-encoded signed transaction", - "sendrawtransaction-allowhighfees": "Whether or not to allow insanely high fees (btcd does not yet implement this parameter, so it has no effect)", + "sendrawtransaction-allowhighfees": "Whether or not to allow insanely high fees (dcrd does not yet implement this parameter, so it has no effect)", "sendrawtransaction--result0": "The hash of the transaction", // SetGenerateCmd help. @@ -431,8 +512,8 @@ var helpDescsEnUS = map[string]string{ "setgenerate-genproclimit": "The number of processors (cores) to limit generation to or -1 for default", // StopCmd help. - "stop--synopsis": "Shutdown btcd.", - "stop--result0": "The string 'btcd stopping.'", + "stop--synopsis": "Shutdown dcrd.", + "stop--result0": "The string 'dcrd stopping.'", // SubmitBlockOptions help. "submitblockoptions-workid": "This parameter is currently ignored", @@ -447,16 +528,16 @@ var helpDescsEnUS = map[string]string{ // ValidateAddressResult help. "validateaddresschainresult-isvalid": "Whether or not the address is valid", - "validateaddresschainresult-address": "The bitcoin address (only when isvalid is true)", + "validateaddresschainresult-address": "The decred address (only when isvalid is true)", // ValidateAddressCmd help. "validateaddress--synopsis": "Verify an address is valid.", - "validateaddress-address": "Bitcoin address to validate", + "validateaddress-address": "Decred address to validate", // VerifyChainCmd help. "verifychain--synopsis": "Verifies the block chain database.\n" + "The actual checks performed by the checklevel parameter are implementation specific.\n" + - "For btcd this is:\n" + + "For dcrd this is:\n" + "checklevel=0 - Look up each block and ensure it can be loaded from the database.\n" + "checklevel=1 - Perform basic context-free sanity checks on each block.", "verifychain-checklevel": "How thorough the block verification is", @@ -465,13 +546,29 @@ var helpDescsEnUS = map[string]string{ // VerifyMessageCmd help. "verifymessage--synopsis": "Verify a signed message.", - "verifymessage-address": "The bitcoin address to use for the signature", + "verifymessage-address": "The decred address to use for the signature", "verifymessage-signature": "The base-64 encoded signature provided by the signer", "verifymessage-message": "The signed message", "verifymessage--result0": "Whether or not the signature verified", // -------- Websocket-specific help -------- + // Session help. + "session--synopsis": "Return details regarding a websocket client's current connection session.", + "sessionresult-sessionid": "The unique session ID for a client's websocket connection.", + + // NotifySpentAndMissedTicketsCmd help + "notifyspentandmissedtickets--synopsis": "Request notifications for whenever tickets are spent or missed.", + + // NotifyNewTicketsCmd help + "notifynewtickets--synopsis": "Request notifications for whenever new tickets are found.", + + // NotifyStakeDifficultyCmd help + "notifystakedifficulty--synopsis": "Request notifications for whenever stake difficulty goes up.", + + // NotifyWinningTicketsCmd help + "notifywinningtickets--synopsis": "Request notifications for whenever any tickets is chosen to vote.", + // NotifyBlocksCmd help. "notifyblocks--synopsis": "Request notifications for whenever a block is connected or disconnected from the main (best) chain.", @@ -497,9 +594,10 @@ var helpDescsEnUS = map[string]string{ // OutPoint help. "outpoint-hash": "The hex-encoded bytes of the outpoint hash", "outpoint-index": "The index of the outpoint", + "outpoint-tree": "The tree of the outpoint", // NotifySpentCmd help. - "notifyspent--synopsis": "Send a redeemingtx notification when a transaction spending an outpoint appears in mempool (if relayed to this btcd instance) and when such a transaction first appears in a newly-attached block.", + "notifyspent--synopsis": "Send a redeemingtx notification when a transaction spending an outpoint appears in mempool (if relayed to this dcrd instance) and when such a transaction first appears in a newly-attached block.", "notifyspent-outpoints": "List of transaction outpoints to monitor.", // StopNotifySpentCmd help. @@ -515,6 +613,32 @@ var helpDescsEnUS = map[string]string{ "rescan-addresses": "List of addresses to include in the rescan", "rescan-outpoints": "List of transaction outpoints to include in the rescan", "rescan-endblock": "Hash of final block to rescan", + + // EstimateFee help. + "estimatefee--synopsis": "Returns the estimated fee in dcr/kb.", + "estimatefee-numblocks": "(unused)", + "estimatefee--result0": "Estimated fee.", + + // TicketBuckets help. + "ticketbuckets--synopsis": "Request for the number of tickets currently in each bucket of the ticket database.", + "ticketbucket-tickets": "Number of tickets in bucket.", + "ticketbucket-number": "Bucket number.", + + // TicketsForAddress help. + "ticketsforaddress--synopsis": "Request all the tickets for an address.", + "ticketsforaddress-address": "Address to look for.", + "ticketsforaddressresult-tickets": "Tickets owned by the specified address.", + + // TicketsForBucket help. + "ticketsforbucket--synopsis": "Request all the tickets and owners in a given bucket.", + "ticketsforbucket-bucket": "Bucket to look for.", + "ticketsforbucketresult-tickets": "Result for the ticketsfor bucket command.", + "ticket-owner": "Address owning the ticket.", + "ticket-hash": "Hash of the ticket.", + + // MissedTickets help. + "missedtickets--synopsis": "Reguest tickets the client missed", + "missedticketsresult-tickets": "List of missed tickets", } // rpcResultTypes specifies the result types that each RPC command can return. @@ -522,54 +646,69 @@ var helpDescsEnUS = map[string]string{ // pointer to the type (or nil to indicate no return value). var rpcResultTypes = map[string][]interface{}{ "addnode": nil, + "createrawsstx": []interface{}{(*string)(nil)}, + "createrawssgentx": []interface{}{(*string)(nil)}, + "createrawssrtx": []interface{}{(*string)(nil)}, "createrawtransaction": []interface{}{(*string)(nil)}, "debuglevel": []interface{}{(*string)(nil), (*string)(nil)}, - "decoderawtransaction": []interface{}{(*btcjson.TxRawDecodeResult)(nil)}, - "decodescript": []interface{}{(*btcjson.DecodeScriptResult)(nil)}, + "decoderawtransaction": []interface{}{(*dcrjson.TxRawDecodeResult)(nil)}, + "decodescript": []interface{}{(*dcrjson.DecodeScriptResult)(nil)}, + "estimatefee": []interface{}{(*float64)(nil)}, + "existsaddress": []interface{}{(*dcrjson.ExistsAddressResult)(nil)}, + "getaddednodeinfo": []interface{}{(*[]string)(nil), (*[]dcrjson.GetAddedNodeInfoResult)(nil)}, + "getbestblock": []interface{}{(*dcrjson.GetBestBlockResult)(nil)}, "generate": []interface{}{(*[]string)(nil)}, - "getaddednodeinfo": []interface{}{(*[]string)(nil), (*[]btcjson.GetAddedNodeInfoResult)(nil)}, - "getbestblock": []interface{}{(*btcjson.GetBestBlockResult)(nil)}, "getbestblockhash": []interface{}{(*string)(nil)}, - "getblock": []interface{}{(*string)(nil), (*btcjson.GetBlockVerboseResult)(nil)}, + "getblock": []interface{}{(*string)(nil), (*dcrjson.GetBlockVerboseResult)(nil)}, "getblockcount": []interface{}{(*int64)(nil)}, "getblockhash": []interface{}{(*string)(nil)}, - "getblocktemplate": []interface{}{(*btcjson.GetBlockTemplateResult)(nil), (*string)(nil), nil}, + "getblocktemplate": []interface{}{(*dcrjson.GetBlockTemplateResult)(nil), (*string)(nil), nil}, "getconnectioncount": []interface{}{(*int32)(nil)}, "getcurrentnet": []interface{}{(*uint32)(nil)}, "getdifficulty": []interface{}{(*float64)(nil)}, + "getstakedifficulty": []interface{}{(*dcrjson.GetStakeDifficultyResult)(nil)}, "getgenerate": []interface{}{(*bool)(nil)}, "gethashespersec": []interface{}{(*float64)(nil)}, - "getinfo": []interface{}{(*btcjson.InfoChainResult)(nil)}, - "getmininginfo": []interface{}{(*btcjson.GetMiningInfoResult)(nil)}, - "getnettotals": []interface{}{(*btcjson.GetNetTotalsResult)(nil)}, + "getinfo": []interface{}{(*dcrjson.InfoChainResult)(nil)}, + "getmininginfo": []interface{}{(*dcrjson.GetMiningInfoResult)(nil)}, + "getnettotals": []interface{}{(*dcrjson.GetNetTotalsResult)(nil)}, "getnetworkhashps": []interface{}{(*int64)(nil)}, - "getpeerinfo": []interface{}{(*[]btcjson.GetPeerInfoResult)(nil)}, - "getrawmempool": []interface{}{(*[]string)(nil), (*btcjson.GetRawMempoolVerboseResult)(nil)}, - "getrawtransaction": []interface{}{(*string)(nil), (*btcjson.TxRawResult)(nil)}, - "gettxout": []interface{}{(*btcjson.GetTxOutResult)(nil)}, - "getwork": []interface{}{(*btcjson.GetWorkResult)(nil), (*bool)(nil)}, + "getpeerinfo": []interface{}{(*[]dcrjson.GetPeerInfoResult)(nil)}, + "getrawmempool": []interface{}{(*[]string)(nil), (*dcrjson.GetRawMempoolVerboseResult)(nil)}, + "getrawtransaction": []interface{}{(*string)(nil), (*dcrjson.TxRawResult)(nil)}, + "gettxout": []interface{}{(*dcrjson.GetTxOutResult)(nil)}, + "getwork": []interface{}{(*dcrjson.GetWorkResult)(nil), (*bool)(nil)}, + "missedtickets": []interface{}{(*dcrjson.MissedTicketsResult)(nil)}, "node": nil, "help": []interface{}{(*string)(nil), (*string)(nil)}, "ping": nil, - "searchrawtransactions": []interface{}{(*string)(nil), (*[]btcjson.TxRawResult)(nil)}, + "rebroadcastmissed": nil, + "rebroadcastwinners": nil, + "searchrawtransactions": []interface{}{(*string)(nil), (*[]dcrjson.TxRawResult)(nil)}, "sendrawtransaction": []interface{}{(*string)(nil)}, "setgenerate": nil, "stop": []interface{}{(*string)(nil)}, "submitblock": []interface{}{nil, (*string)(nil)}, - "validateaddress": []interface{}{(*btcjson.ValidateAddressChainResult)(nil)}, + "ticketsforaddress": []interface{}{(*dcrjson.TicketsForAddressResult)(nil)}, + "validateaddress": []interface{}{(*dcrjson.ValidateAddressChainResult)(nil)}, "verifychain": []interface{}{(*bool)(nil)}, "verifymessage": []interface{}{(*bool)(nil)}, // Websocket commands. - "notifyblocks": nil, - "stopnotifyblocks": nil, - "notifynewtransactions": nil, - "stopnotifynewtransactions": nil, - "notifyreceived": nil, - "stopnotifyreceived": nil, - "notifyspent": nil, - "stopnotifyspent": nil, - "rescan": nil, + "session": []interface{}{(*dcrjson.SessionResult)(nil)}, + "notifywinningtickets": nil, + "notifyspentandmissedtickets": nil, + "notifynewtickets": nil, + "notifystakedifficulty": nil, + "notifyblocks": nil, + "notifynewtransactions": nil, + "notifyreceived": nil, + "notifyspent": nil, + "rescan": nil, + "stopnotifyblocks": nil, + "stopnotifynewtransactions": nil, + "stopnotifyreceived": nil, + "stopnotifyspent": nil, } // helpCacher provides a concurrent safe type that provides help and usage for @@ -600,7 +739,7 @@ func (c *helpCacher) rpcMethodHelp(method string) (string, error) { } // Generate, cache, and return the help. - help, err := btcjson.GenerateHelp(method, helpDescsEnUS, resultTypes...) + help, err := dcrjson.GenerateHelp(method, helpDescsEnUS, resultTypes...) if err != nil { return "", err } @@ -623,7 +762,7 @@ func (c *helpCacher) rpcUsage(includeWebsockets bool) (string, error) { // Generate a list of one-line usage for every command. usageTexts := make([]string, 0, len(rpcHandlers)) for k := range rpcHandlers { - usage, err := btcjson.MethodUsageText(k) + usage, err := dcrjson.MethodUsageText(k) if err != nil { return "", err } @@ -633,7 +772,7 @@ func (c *helpCacher) rpcUsage(includeWebsockets bool) (string, error) { // Include websockets commands if requested. if includeWebsockets { for k := range wsHandlers { - usage, err := btcjson.MethodUsageText(k) + usage, err := dcrjson.MethodUsageText(k) if err != nil { return "", err } diff --git a/rpcserverhelp_test.go b/rpcserverhelp_test.go index 45974313..fb71da12 100644 --- a/rpcserverhelp_test.go +++ b/rpcserverhelp_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/rpcwebsocket.go b/rpcwebsocket.go index 5b2ec323..2f47e429 100644 --- a/rpcwebsocket.go +++ b/rpcwebsocket.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -14,17 +15,22 @@ import ( "errors" "fmt" "io" + "strconv" "sync" "time" - "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/btcsuite/fastsha256" "github.com/btcsuite/golangcrypto/ripemd160" "github.com/btcsuite/websocket" + + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/dcrjson" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -49,16 +55,21 @@ type wsCommandHandler func(*wsClient, interface{}) (interface{}, error) // causes a dependency loop. var wsHandlers map[string]wsCommandHandler var wsHandlersBeforeInit = map[string]wsCommandHandler{ - "help": handleWebsocketHelp, - "notifyblocks": handleNotifyBlocks, - "notifynewtransactions": handleNotifyNewTransactions, - "notifyreceived": handleNotifyReceived, - "notifyspent": handleNotifySpent, - "stopnotifyblocks": handleStopNotifyBlocks, - "stopnotifynewtransactions": handleStopNotifyNewTransactions, - "stopnotifyspent": handleStopNotifySpent, - "stopnotifyreceived": handleStopNotifyReceived, - "rescan": handleRescan, + "notifyblocks": handleNotifyBlocks, + "notifywinningtickets": handleWinningTickets, + "notifyspentandmissedtickets": handleSpentAndMissedTickets, + "notifynewtickets": handleNewTickets, + "notifystakedifficulty": handleStakeDifficulty, + "notifynewtransactions": handleNotifyNewTransactions, + "notifyreceived": handleNotifyReceived, + "notifyspent": handleNotifySpent, + "session": handleSession, + "help": handleWebsocketHelp, + "rescan": handleRescan, + "stopnotifyblocks": handleStopNotifyBlocks, + "stopnotifynewtransactions": handleStopNotifyNewTransactions, + "stopnotifyspent": handleStopNotifySpent, + "stopnotifyreceived": handleStopNotifyReceived, } // wsAsyncHandlers holds the websocket commands which should be run @@ -94,7 +105,12 @@ func (s *rpcServer) WebsocketHandler(conn *websocket.Conn, remoteAddr string, // Create a new websocket client to handle the new websocket connection // and wait for it to shutdown. Once it has shutdown (and hence // disconnected), remove it and any notifications it registered for. - client := newWebsocketClient(s, conn, remoteAddr, authenticated, isAdmin) + client, err := newWebsocketClient(s, conn, remoteAddr, authenticated, isAdmin) + if err != nil { + rpcsLog.Errorf("Failed to serve client %s: %v", remoteAddr, err) + conn.Close() + return + } s.ntfnMgr.AddClient(client) client.Start() client.WaitForShutdown() @@ -188,7 +204,7 @@ func (m *wsNotificationManager) queueHandler() { // NotifyBlockConnected passes a block newly-connected to the best chain // to the notification manager for block and transaction notification // processing. -func (m *wsNotificationManager) NotifyBlockConnected(block *btcutil.Block) { +func (m *wsNotificationManager) NotifyBlockConnected(block *dcrutil.Block) { // As NotifyBlockConnected will be called by the block manager // and the RPC server may no longer be running, use a select // statement to unblock enqueueing the notification once the RPC @@ -201,7 +217,7 @@ func (m *wsNotificationManager) NotifyBlockConnected(block *btcutil.Block) { // NotifyBlockDisconnected passes a block disconnected from the best chain // to the notification manager for block notification processing. -func (m *wsNotificationManager) NotifyBlockDisconnected(block *btcutil.Block) { +func (m *wsNotificationManager) NotifyBlockDisconnected(block *dcrutil.Block) { // As NotifyBlockDisconnected will be called by the block manager // and the RPC server may no longer be running, use a select // statement to unblock enqueueing the notification once the RPC @@ -212,11 +228,81 @@ func (m *wsNotificationManager) NotifyBlockDisconnected(block *btcutil.Block) { } } +// NotifyReorganization passes a blockchain reorganization notification for +// reorganization notification processing. +func (m *wsNotificationManager) NotifyReorganization(rd *blockchain.ReorganizationNtfnsData) { + // As NotifyReorganization will be called by the block manager + // and the RPC server may no longer be running, use a select + // statement to unblock enqueueing the notification once the RPC + // server has begun shutting down. + select { + case m.queueNotification <- (*notificationReorganization)(rd): + case <-m.quit: + } +} + +// NotifyWinningTickets passes newly winning tickets for an incoming block +// to the notification manager for further processing. +func (m *wsNotificationManager) NotifyWinningTickets( + wtnd *WinningTicketsNtfnData) { + // As NotifyWinningTickets will be called by the block manager + // and the RPC server may no longer be running, use a select + // statement to unblock enqueueing the notification once the RPC + // server has begun shutting down. + select { + case m.queueNotification <- (*notificationWinningTickets)(wtnd): + case <-m.quit: + } +} + +// NotifySpentAndMissedTickets passes ticket spend and missing data for an +// incoming block from the best chain to the notification manager for block +// notification processing. +func (m *wsNotificationManager) NotifySpentAndMissedTickets( + tnd *blockchain.TicketNotificationsData) { + // As NotifySpentAndMissedTickets will be called by the block manager + // and the RPC server may no longer be running, use a select + // statement to unblock enqueueing the notification once the RPC + // server has begun shutting down. + select { + case m.queueNotification <- (*notificationSpentAndMissedTickets)(tnd): + case <-m.quit: + } +} + +// NotifyNewTickets passes a new ticket data for an incoming block from the best +// chain to the notification manager for block notification processing. +func (m *wsNotificationManager) NotifyNewTickets( + tnd *blockchain.TicketNotificationsData) { + // As NotifyNewTickets will be called by the block manager + // and the RPC server may no longer be running, use a select + // statement to unblock enqueueing the notification once the RPC + // server has begun shutting down. + select { + case m.queueNotification <- (*notificationNewTickets)(tnd): + case <-m.quit: + } +} + +// NotifyNewTickets passes a new ticket data for an incoming block from the best +// chain to the notification manager for block notification processing. +func (m *wsNotificationManager) NotifyStakeDifficulty( + stnd *StakeDifficultyNtfnData) { + // As NotifyNewTickets will be called by the block manager + // and the RPC server may no longer be running, use a select + // statement to unblock enqueueing the notification once the RPC + // server has begun shutting down. + select { + case m.queueNotification <- (*notificationStakeDifficulty)(stnd): + case <-m.quit: + } +} + // NotifyMempoolTx passes a transaction accepted by mempool to the // notification manager for transaction notification processing. If // isNew is true, the tx is is a new transaction, rather than one // added to the mempool during a reorg. -func (m *wsNotificationManager) NotifyMempoolTx(tx *btcutil.Tx, isNew bool) { +func (m *wsNotificationManager) NotifyMempoolTx(tx *dcrutil.Tx, isNew bool) { n := ¬ificationTxAcceptedByMempool{ isNew: isNew, tx: tx, @@ -232,12 +318,34 @@ func (m *wsNotificationManager) NotifyMempoolTx(tx *btcutil.Tx, isNew bool) { } } +// WinningTicketNtfnData is the data that is used to generate +// winning ticket notifications (which indicate a block and +// the tickets eligible to vote on it). +type WinningTicketsNtfnData struct { + BlockHash chainhash.Hash + BlockHeight int64 + Tickets []chainhash.Hash +} + +// StakeDifficultyNtfnData is the data that is used to generate +// stake difficulty notifications. +type StakeDifficultyNtfnData struct { + BlockHash chainhash.Hash + BlockHeight int64 + StakeDifficulty int64 +} + // Notification types -type notificationBlockConnected btcutil.Block -type notificationBlockDisconnected btcutil.Block +type notificationBlockConnected dcrutil.Block +type notificationBlockDisconnected dcrutil.Block +type notificationReorganization blockchain.ReorganizationNtfnsData +type notificationWinningTickets WinningTicketsNtfnData +type notificationSpentAndMissedTickets blockchain.TicketNotificationsData +type notificationNewTickets blockchain.TicketNotificationsData +type notificationStakeDifficulty StakeDifficultyNtfnData type notificationTxAcceptedByMempool struct { isNew bool - tx *btcutil.Tx + tx *dcrutil.Tx } // Notification control requests @@ -245,6 +353,14 @@ type notificationRegisterClient wsClient type notificationUnregisterClient wsClient type notificationRegisterBlocks wsClient type notificationUnregisterBlocks wsClient +type notificationRegisterWinningTickets wsClient +type notificationUnregisterWinningTickets wsClient +type notificationRegisterSpentAndMissedTickets wsClient +type notificationUnregisterSpentAndMissedTickets wsClient +type notificationRegisterNewTickets wsClient +type notificationUnregisterNewTickets wsClient +type notificationRegisterStakeDifficulty wsClient +type notificationUnregisterStakeDifficulty wsClient type notificationRegisterNewMempoolTxs wsClient type notificationUnregisterNewMempoolTxs wsClient type notificationRegisterSpent struct { @@ -278,6 +394,10 @@ func (m *wsNotificationManager) notificationHandler() { // Where possible, the quit channel is used as the unique id for a client // since it is quite a bit more efficient than using the entire struct. blockNotifications := make(map[chan struct{}]*wsClient) + winningTicketNotifications := make(map[chan struct{}]*wsClient) + ticketSMNotifications := make(map[chan struct{}]*wsClient) + ticketNewNotifications := make(map[chan struct{}]*wsClient) + stakeDifficultyNotifications := make(map[chan struct{}]*wsClient) txNotifications := make(map[chan struct{}]*wsClient) watchedOutPoints := make(map[wire.OutPoint]map[chan struct{}]*wsClient) watchedAddrs := make(map[string]map[chan struct{}]*wsClient) @@ -292,14 +412,36 @@ out: } switch n := n.(type) { case *notificationBlockConnected: - block := (*btcutil.Block)(n) + block := (*dcrutil.Block)(n) + + // If the block was voted for by the stakeholders, announce the + // transactions to the notifications watcher. + msgblock := block.MsgBlock() + votebits := msgblock.Header.VoteBits // Skip iterating through all txs if no // tx notification requests exist. if len(watchedOutPoints) != 0 || len(watchedAddrs) != 0 { - for _, tx := range block.Transactions() { - m.notifyForTx(watchedOutPoints, - watchedAddrs, tx, block) + if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) { + prevblock, err := m.server.server.db.FetchBlockBySha( + &msgblock.Header.PrevBlock) + if err != nil { + rpcsLog.Error("Previous block could not be loaded "+ + "from database!", err) + break // Correct behaviour? This should never happen + } + + for _, tx := range prevblock.Transactions() { + m.notifyForTx(watchedOutPoints, + watchedAddrs, + tx, + prevblock) + } + } + + // Stake tx are included regardless of voting. + for _, tx := range block.STransactions() { + m.notifyForTx(watchedOutPoints, watchedAddrs, tx, block) } } @@ -310,7 +452,27 @@ out: case *notificationBlockDisconnected: m.notifyBlockDisconnected(blockNotifications, - (*btcutil.Block)(n)) + (*dcrutil.Block)(n)) + + case *notificationReorganization: + m.notifyReorganization(blockNotifications, + (*blockchain.ReorganizationNtfnsData)(n)) + + case *notificationWinningTickets: + m.notifyWinningTickets(winningTicketNotifications, + (*WinningTicketsNtfnData)(n)) + + case *notificationSpentAndMissedTickets: + m.notifySpentAndMissedTickets(ticketSMNotifications, + (*blockchain.TicketNotificationsData)(n)) + + case *notificationNewTickets: + m.notifyNewTickets(ticketNewNotifications, + (*blockchain.TicketNotificationsData)(n)) + + case *notificationStakeDifficulty: + m.notifyStakeDifficulty(stakeDifficultyNotifications, + (*StakeDifficultyNtfnData)(n)) case *notificationTxAcceptedByMempool: if n.isNew && len(txNotifications) != 0 { @@ -326,6 +488,38 @@ out: wsc := (*wsClient)(n) delete(blockNotifications, wsc.quit) + case *notificationRegisterWinningTickets: + wsc := (*wsClient)(n) + winningTicketNotifications[wsc.quit] = wsc + + case *notificationUnregisterWinningTickets: + wsc := (*wsClient)(n) + delete(winningTicketNotifications, wsc.quit) + + case *notificationRegisterSpentAndMissedTickets: + wsc := (*wsClient)(n) + ticketSMNotifications[wsc.quit] = wsc + + case *notificationUnregisterSpentAndMissedTickets: + wsc := (*wsClient)(n) + delete(ticketSMNotifications, wsc.quit) + + case *notificationRegisterNewTickets: + wsc := (*wsClient)(n) + ticketNewNotifications[wsc.quit] = wsc + + case *notificationUnregisterNewTickets: + wsc := (*wsClient)(n) + delete(ticketNewNotifications, wsc.quit) + + case *notificationRegisterStakeDifficulty: + wsc := (*wsClient)(n) + stakeDifficultyNotifications[wsc.quit] = wsc + + case *notificationUnregisterStakeDifficulty: + wsc := (*wsClient)(n) + delete(stakeDifficultyNotifications, wsc.quit) + case *notificationRegisterClient: wsc := (*wsClient)(n) clients[wsc.quit] = wsc @@ -407,12 +601,13 @@ func (m *wsNotificationManager) UnregisterBlockUpdates(wsc *wsClient) { // notifyBlockConnected notifies websocket clients that have registered for // block updates when a block is connected to the main chain. func (*wsNotificationManager) notifyBlockConnected(clients map[chan struct{}]*wsClient, - block *btcutil.Block) { + block *dcrutil.Block) { // Notify interested websocket clients about the connected block. - ntfn := btcjson.NewBlockConnectedNtfn(block.Sha().String(), - int32(block.Height()), block.MsgBlock().Header.Timestamp.Unix()) - marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn) + ntfn := dcrjson.NewBlockConnectedNtfn(block.Sha().String(), + int32(block.Height()), block.MsgBlock().Header.Timestamp.Unix(), + block.MsgBlock().Header.VoteBits) + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) if err != nil { rpcsLog.Error("Failed to marshal block connected notification: "+ "%v", err) @@ -426,7 +621,7 @@ func (*wsNotificationManager) notifyBlockConnected(clients map[chan struct{}]*ws // notifyBlockDisconnected notifies websocket clients that have registered for // block updates when a block is disconnected from the main chain (due to a // reorganize). -func (*wsNotificationManager) notifyBlockDisconnected(clients map[chan struct{}]*wsClient, block *btcutil.Block) { +func (*wsNotificationManager) notifyBlockDisconnected(clients map[chan struct{}]*wsClient, block *dcrutil.Block) { // Skip notification creation if no clients have requested block // connected/disconnected notifications. if len(clients) == 0 { @@ -434,9 +629,10 @@ func (*wsNotificationManager) notifyBlockDisconnected(clients map[chan struct{}] } // Notify interested websocket clients about the disconnected block. - ntfn := btcjson.NewBlockDisconnectedNtfn(block.Sha().String(), - int32(block.Height()), block.MsgBlock().Header.Timestamp.Unix()) - marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn) + ntfn := dcrjson.NewBlockDisconnectedNtfn(block.Sha().String(), + int32(block.Height()), block.MsgBlock().Header.Timestamp.Unix(), + block.MsgBlock().Header.VoteBits) + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) if err != nil { rpcsLog.Error("Failed to marshal block disconnected "+ "notification: %v", err) @@ -447,6 +643,191 @@ func (*wsNotificationManager) notifyBlockDisconnected(clients map[chan struct{}] } } +// notifyReorganization notifies websocket clients that have registered for +// block updates when the blockchain is beginning a reorganization. +func (m *wsNotificationManager) notifyReorganization(clients map[chan struct{}]*wsClient, rd *blockchain.ReorganizationNtfnsData) { + // Skip notification creation if no clients have requested block + // connected/disconnected notifications. + if len(clients) == 0 { + return + } + + // Notify interested websocket clients about the disconnected block. + ntfn := dcrjson.NewReorganizationNtfn(rd.OldHash.String(), + int32(rd.OldHeight), + rd.NewHash.String(), + int32(rd.NewHeight)) + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Error("Failed to marshal reorganization "+ + "notification: %v", err) + return + } + for _, wsc := range clients { + wsc.QueueNotification(marshalledJSON) + } +} + +// RegisterWinningTickets requests winning tickets update notifications +// to the passed websocket client. +func (m *wsNotificationManager) RegisterWinningTickets(wsc *wsClient) { + m.queueNotification <- (*notificationRegisterWinningTickets)(wsc) +} + +// UnregisterWinningTickets removes winning ticket notifications for +// the passed websocket client. +func (m *wsNotificationManager) UnregisterWinningTickets(wsc *wsClient) { + m.queueNotification <- (*notificationUnregisterWinningTickets)(wsc) +} + +// notifyWinningTickets notifies websocket clients that have registered for +// winning ticket updates. +func (*wsNotificationManager) notifyWinningTickets( + clients map[chan struct{}]*wsClient, wtnd *WinningTicketsNtfnData) { + + // Create a ticket map to export as JSON. + ticketMap := make(map[string]string) + for i, ticket := range wtnd.Tickets { + ticketMap[strconv.Itoa(i)] = ticket.String() + } + + // Notify interested websocket clients about the connected block. + ntfn := dcrjson.NewWinningTicketsNtfn(wtnd.BlockHash.String(), + int32(wtnd.BlockHeight), + ticketMap) + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Error("Failed to marshal winning tickets notification: "+ + "%v", err) + return + } + + for _, wsc := range clients { + wsc.QueueNotification(marshalledJSON) + } +} + +// RegisterSpentAndMissedTickets requests spent/missed tickets update notifications +// to the passed websocket client. +func (m *wsNotificationManager) RegisterSpentAndMissedTickets(wsc *wsClient) { + m.queueNotification <- (*notificationRegisterSpentAndMissedTickets)(wsc) +} + +// UnregisterSpentAndMissedTickets removes spent/missed ticket notifications for +// the passed websocket client. +func (m *wsNotificationManager) UnregisterSpentAndMissedTickets(wsc *wsClient) { + m.queueNotification <- (*notificationUnregisterSpentAndMissedTickets)(wsc) +} + +// notifySpentAndMissedTickets notifies websocket clients that have registered for +// spent and missed ticket updates. +func (*wsNotificationManager) notifySpentAndMissedTickets( + clients map[chan struct{}]*wsClient, tnd *blockchain.TicketNotificationsData) { + + // Create a ticket map to export as JSON. + ticketMap := make(map[string]string) + for _, ticket := range tnd.TicketMap { + if ticket.Missed == true { + ticketMap[ticket.SStxHash.String()] = "missed" + } else { + ticketMap[ticket.SStxHash.String()] = "spent" + } + } + + // Notify interested websocket clients about the connected block. + ntfn := dcrjson.NewSpentAndMissedTicketsNtfn(tnd.Hash.String(), + int32(tnd.Height), + tnd.StakeDifficulty, + ticketMap) + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Error("Failed to marshal spent and missed tickets notification: "+ + "%v", err) + return + } + + for _, wsc := range clients { + wsc.QueueNotification(marshalledJSON) + } +} + +// RegisterNewTickets requests spent/missed tickets update notifications +// to the passed websocket client. +func (m *wsNotificationManager) RegisterNewTickets(wsc *wsClient) { + m.queueNotification <- (*notificationRegisterNewTickets)(wsc) +} + +// UnregisterNewTickets removes spent/missed ticket notifications for +// the passed websocket client. +func (m *wsNotificationManager) UnregisterNewTickets(wsc *wsClient) { + m.queueNotification <- (*notificationUnregisterNewTickets)(wsc) +} + +// RegisterStakeDifficulty requests stake difficulty notifications +// to the passed websocket client. +func (m *wsNotificationManager) RegisterStakeDifficulty(wsc *wsClient) { + m.queueNotification <- (*notificationRegisterStakeDifficulty)(wsc) +} + +// UnregisterStakeDifficulty removes stake difficulty notifications for +// the passed websocket client. +func (m *wsNotificationManager) UnregisterStakeDifficulty(wsc *wsClient) { + m.queueNotification <- (*notificationUnregisterStakeDifficulty)(wsc) +} + +// notifyNewTickets notifies websocket clients that have registered for +// maturing ticket updates. +func (*wsNotificationManager) notifyNewTickets(clients map[chan struct{}]*wsClient, + tnd *blockchain.TicketNotificationsData) { + + // Create a ticket map to export as JSON. + tickets := make([]string, 0) + for h, _ := range tnd.TicketMap { + tickets = append(tickets, h.String()) + } + + // Notify interested websocket clients about the connected block. + ntfn := dcrjson.NewNewTicketsNtfn(tnd.Hash.String(), + int32(tnd.Height), + tnd.StakeDifficulty, + tickets) + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Error("Failed to marshal new tickets notification: "+ + "%v", err) + return + } + for _, wsc := range clients { + wsc.QueueNotification(marshalledJSON) + } +} + +// notifyStakeDifficulty notifies websocket clients that have registered for +// maturing ticket updates. +func (*wsNotificationManager) notifyStakeDifficulty( + clients map[chan struct{}]*wsClient, + sdnd *StakeDifficultyNtfnData) { + + // Notify interested websocket clients about the connected block. + ntfn := dcrjson.NewStakeDifficultyNtfn(sdnd.BlockHash.String(), + int32(sdnd.BlockHeight), + sdnd.StakeDifficulty) + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Error("Failed to marshal stake difficulty notification: "+ + "%v", err) + return + } + + for _, wsc := range clients { + wsc.QueueNotification(marshalledJSON) + } +} + // RegisterNewMempoolTxsUpdates requests notifications to the passed websocket // client when new transactions are added to the memory pool. func (m *wsNotificationManager) RegisterNewMempoolTxsUpdates(wsc *wsClient) { @@ -461,7 +842,7 @@ func (m *wsNotificationManager) UnregisterNewMempoolTxsUpdates(wsc *wsClient) { // notifyForNewTx notifies websocket clients that have registered for updates // when a new transaction is added to the memory pool. -func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClient, tx *btcutil.Tx) { +func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClient, tx *dcrutil.Tx) { txShaStr := tx.Sha().String() mtx := tx.MsgTx() @@ -470,14 +851,14 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie amount += txOut.Value } - ntfn := btcjson.NewTxAcceptedNtfn(txShaStr, btcutil.Amount(amount).ToBTC()) - marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn) + ntfn := dcrjson.NewTxAcceptedNtfn(txShaStr, dcrutil.Amount(amount).ToCoin()) + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) if err != nil { rpcsLog.Errorf("Failed to marshal tx notification: %s", err.Error()) return } - var verboseNtfn *btcjson.TxAcceptedVerboseNtfn + var verboseNtfn *dcrjson.TxAcceptedVerboseNtfn var marshalledJSONVerbose []byte for _, wsc := range clients { if wsc.verboseTxUpdates { @@ -488,13 +869,13 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie net := m.server.server.chainParams rawTx, err := createTxRawResult(net, txShaStr, mtx, nil, - 0, nil) + 0, nil, int64(wire.NullBlockHeight), wire.NullBlockIndex) if err != nil { return } - verboseNtfn = btcjson.NewTxAcceptedVerboseNtfn(*rawTx) - marshalledJSONVerbose, err = btcjson.MarshalCmd(nil, + verboseNtfn = dcrjson.NewTxAcceptedVerboseNtfn(*rawTx) + marshalledJSONVerbose, err = dcrjson.MarshalCmd(nil, verboseNtfn) if err != nil { rpcsLog.Errorf("Failed to marshal verbose tx "+ @@ -578,41 +959,45 @@ func (*wsNotificationManager) removeSpentRequest(ops map[wire.OutPoint]map[chan } // txHexString returns the serialized transaction encoded in hexadecimal. -func txHexString(tx *btcutil.Tx) string { +func txHexString(tx *dcrutil.Tx) string { buf := bytes.NewBuffer(make([]byte, 0, tx.MsgTx().SerializeSize())) // Ignore Serialize's error, as writing to a bytes.buffer cannot fail. tx.MsgTx().Serialize(buf) return hex.EncodeToString(buf.Bytes()) } -// blockDetails creates a BlockDetails struct to include in btcws notifications +// blockDetails creates a BlockDetails struct to include in dcrws notifications // from a block and a transaction's block index. -func blockDetails(block *btcutil.Block, txIndex int) *btcjson.BlockDetails { +func blockDetails(block *dcrutil.Block, txTree int8, txIndex int) *dcrjson.BlockDetails { if block == nil { return nil } - return &btcjson.BlockDetails{ - Height: int32(block.Height()), - Hash: block.Sha().String(), - Index: txIndex, - Time: block.MsgBlock().Header.Timestamp.Unix(), + return &dcrjson.BlockDetails{ + Height: int32(block.Height()), + Hash: block.Sha().String(), + Index: txIndex, + Time: block.MsgBlock().Header.Timestamp.Unix(), + Tree: txTree, + VoteBits: block.MsgBlock().Header.VoteBits, } } // newRedeemingTxNotification returns a new marshalled redeemingtx notification // with the passed parameters. -func newRedeemingTxNotification(txHex string, index int, block *btcutil.Block) ([]byte, error) { +func newRedeemingTxNotification(txHex string, tree int8, index int, block *dcrutil.Block) ([]byte, error) { // Create and marshal the notification. - ntfn := btcjson.NewRedeemingTxNtfn(txHex, blockDetails(block, index)) - return btcjson.MarshalCmd(nil, ntfn) + ntfn := dcrjson.NewRedeemingTxNtfn(txHex, blockDetails(block, tree, index)) + return dcrjson.MarshalCmd(nil, ntfn) } // notifyForTxOuts examines each transaction output, notifying interested // websocket clients of the transaction if an output spends to a watched // address. A spent notification request is automatically registered for // the client for each matching output. -func (m *wsNotificationManager) notifyForTxOuts(ops map[wire.OutPoint]map[chan struct{}]*wsClient, - addrs map[string]map[chan struct{}]*wsClient, tx *btcutil.Tx, block *btcutil.Block) { +func (m *wsNotificationManager) notifyForTxOuts( + ops map[wire.OutPoint]map[chan struct{}]*wsClient, + addrs map[string]map[chan struct{}]*wsClient, tx *dcrutil.Tx, + block *dcrutil.Block) { // Nothing to do if nobody is listening for address notifications. if len(addrs) == 0 { @@ -622,7 +1007,7 @@ func (m *wsNotificationManager) notifyForTxOuts(ops map[wire.OutPoint]map[chan s txHex := "" wscNotified := make(map[chan struct{}]struct{}) for i, txOut := range tx.MsgTx().TxOut { - _, txAddrs, _, err := txscript.ExtractPkScriptAddrs( + _, txAddrs, _, err := txscript.ExtractPkScriptAddrs(txOut.Version, txOut.PkScript, m.server.server.chainParams) if err != nil { continue @@ -637,16 +1022,20 @@ func (m *wsNotificationManager) notifyForTxOuts(ops map[wire.OutPoint]map[chan s if txHex == "" { txHex = txHexString(tx) } - ntfn := btcjson.NewRecvTxNtfn(txHex, blockDetails(block, - tx.Index())) + ntfn := dcrjson.NewRecvTxNtfn(txHex, blockDetails(block, + tx.Tree(), tx.Index())) - marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn) + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) if err != nil { - rpcsLog.Errorf("Failed to marshal processedtx notification: %v", err) + rpcsLog.Errorf("Failed to marshal processedtx notification: %v", + err) continue } - op := []*wire.OutPoint{wire.NewOutPoint(tx.Sha(), uint32(i))} + op := []*wire.OutPoint{wire.NewOutPoint( + tx.Sha(), + uint32(i), + tx.Tree())} for wscQuit, wsc := range cmap { m.addSpentRequests(ops, wsc, op) @@ -662,8 +1051,10 @@ func (m *wsNotificationManager) notifyForTxOuts(ops map[wire.OutPoint]map[chan s // notifyForTx examines the inputs and outputs of the passed transaction, // notifying websocket clients of outputs spending to a watched address // and inputs spending a watched outpoint. -func (m *wsNotificationManager) notifyForTx(ops map[wire.OutPoint]map[chan struct{}]*wsClient, - addrs map[string]map[chan struct{}]*wsClient, tx *btcutil.Tx, block *btcutil.Block) { +func (m *wsNotificationManager) notifyForTx( + ops map[wire.OutPoint]map[chan struct{}]*wsClient, + addrs map[string]map[chan struct{}]*wsClient, + tx *dcrutil.Tx, block *dcrutil.Block) { if len(ops) != 0 { m.notifyForTxIns(ops, tx, block) @@ -677,8 +1068,9 @@ func (m *wsNotificationManager) notifyForTx(ops map[wire.OutPoint]map[chan struc // interested websocket clients a redeemingtx notification if any inputs // spend a watched output. If block is non-nil, any matching spent // requests are removed. -func (m *wsNotificationManager) notifyForTxIns(ops map[wire.OutPoint]map[chan struct{}]*wsClient, - tx *btcutil.Tx, block *btcutil.Block) { +func (m *wsNotificationManager) notifyForTxIns( + ops map[wire.OutPoint]map[chan struct{}]*wsClient, tx *dcrutil.Tx, + block *dcrutil.Block) { // Nothing to do if nobody is watching outpoints. if len(ops) == 0 { @@ -693,16 +1085,14 @@ func (m *wsNotificationManager) notifyForTxIns(ops map[wire.OutPoint]map[chan st if txHex == "" { txHex = txHexString(tx) } - marshalledJSON, err := newRedeemingTxNotification(txHex, tx.Index(), block) + marshalledJSON, err := newRedeemingTxNotification(txHex, tx.Tree(), + tx.Index(), block) if err != nil { - rpcsLog.Warnf("Failed to marshal redeemingtx notification: %v", err) + rpcsLog.Warnf("Failed to marshal redeemingtx notification: %v", + err) continue } for wscQuit, wsc := range cmap { - if block != nil { - m.removeSpentRequest(ops, wsc, prevOut) - } - if _, ok := wscNotified[wscQuit]; !ok { wscNotified[wscQuit] = struct{}{} wsc.QueueNotification(marshalledJSON) @@ -714,7 +1104,8 @@ func (m *wsNotificationManager) notifyForTxIns(ops map[wire.OutPoint]map[chan st // RegisterTxOutAddressRequests requests notifications to the passed websocket // client when a transaction output spends to the passed address. -func (m *wsNotificationManager) RegisterTxOutAddressRequests(wsc *wsClient, addrs []string) { +func (m *wsNotificationManager) RegisterTxOutAddressRequests(wsc *wsClient, + addrs []string) { m.queueNotification <- ¬ificationRegisterAddr{ wsc: wsc, addrs: addrs, @@ -724,7 +1115,8 @@ func (m *wsNotificationManager) RegisterTxOutAddressRequests(wsc *wsClient, addr // addAddrRequests adds the websocket client wsc to the address to client set // addrMap so wsc will be notified for any mempool or block transaction outputs // spending to any of the addresses in addrs. -func (*wsNotificationManager) addAddrRequests(addrMap map[string]map[chan struct{}]*wsClient, +func (*wsNotificationManager) addAddrRequests( + addrMap map[string]map[chan struct{}]*wsClient, wsc *wsClient, addrs []string) { for _, addr := range addrs { @@ -745,7 +1137,8 @@ func (*wsNotificationManager) addAddrRequests(addrMap map[string]map[chan struct // UnregisterTxOutAddressRequest removes a request from the passed websocket // client to be notified when a transaction spends to the passed address. -func (m *wsNotificationManager) UnregisterTxOutAddressRequest(wsc *wsClient, addr string) { +func (m *wsNotificationManager) UnregisterTxOutAddressRequest(wsc *wsClient, + addr string) { m.queueNotification <- ¬ificationUnregisterAddr{ wsc: wsc, addr: addr, @@ -755,7 +1148,8 @@ func (m *wsNotificationManager) UnregisterTxOutAddressRequest(wsc *wsClient, add // removeAddrRequest removes the websocket client wsc from the address to // client set addrs so it will no longer receive notification updates for // any transaction outputs send to addr. -func (*wsNotificationManager) removeAddrRequest(addrs map[string]map[chan struct{}]*wsClient, +func (*wsNotificationManager) removeAddrRequest( + addrs map[string]map[chan struct{}]*wsClient, wsc *wsClient, addr string) { // Remove the request tracking from the client. @@ -869,6 +1263,11 @@ type wsClient struct { // false means its access is only to the limited set of RPC calls. isAdmin bool + // sessionID is a random ID generated for each client when connected. + // These IDs may be queried by a client using the session RPC. A change + // to the session ID indicates that the client reconnected. + sessionID uint64 + // verboseTxUpdates specifies whether a client has requested verbose // information about all new transactions. verboseTxUpdates bool @@ -901,7 +1300,7 @@ func (c *wsClient) handleMessage(msg []byte) { if !c.authenticated { // Disconnect immediately if the provided command fails to // parse when the client is not already authenticated. - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(msg, &request); err != nil { c.Disconnect() return @@ -914,7 +1313,7 @@ func (c *wsClient) handleMessage(msg []byte) { // Disconnect immediately if the first command is not // authenticate when not already authenticated. - authCmd, ok := parsedCmd.cmd.(*btcjson.AuthenticateCmd) + authCmd, ok := parsedCmd.cmd.(*dcrjson.AuthenticateCmd) if !ok { rpcsLog.Warnf("Unauthenticated websocket message " + "received") @@ -948,10 +1347,10 @@ func (c *wsClient) handleMessage(msg []byte) { } // Attempt to parse the raw message into a JSON-RPC request. - var request btcjson.Request + var request dcrjson.Request if err := json.Unmarshal(msg, &request); err != nil { - jsonErr := &btcjson.RPCError{ - Code: btcjson.ErrRPCParse.Code, + jsonErr := &dcrjson.RPCError{ + Code: dcrjson.ErrRPCParse.Code, Message: "Failed to parse request: " + err.Error(), } @@ -974,8 +1373,8 @@ func (c *wsClient) handleMessage(msg []byte) { // Check if the user is limited and disconnect client if unauthorized if !c.isAdmin { if _, ok := rpcLimited[request.Method]; !ok { - jsonErr := &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParams.Code, + jsonErr := &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParams.Code, Message: "limited user not authorized for this method", } // Marshal and send response. @@ -1007,7 +1406,7 @@ func (c *wsClient) handleMessage(msg []byte) { // Disconnect if already authenticated and another authenticate command // is received. - if _, ok := cmd.cmd.(*btcjson.AuthenticateCmd); ok { + if _, ok := cmd.cmd.(*dcrjson.AuthenticateCmd); ok { rpcsLog.Warnf("Websocket client %s is already authenticated", c.addr) c.Disconnect() @@ -1383,13 +1782,19 @@ func (c *wsClient) WaitForShutdown() { // incoming and outgoing messages in separate goroutines complete with queueing // and asynchrous handling for long-running operations. func newWebsocketClient(server *rpcServer, conn *websocket.Conn, - remoteAddr string, authenticated bool, isAdmin bool) *wsClient { + remoteAddr string, authenticated bool, isAdmin bool) (*wsClient, error) { - return &wsClient{ + sessionID, err := wire.RandomUint64() + if err != nil { + return nil, err + } + + client := &wsClient{ conn: conn, addr: remoteAddr, authenticated: authenticated, isAdmin: isAdmin, + sessionID: sessionID, server: server, addrRequests: make(map[string]struct{}), spentRequests: make(map[wire.OutPoint]struct{}), @@ -1398,13 +1803,14 @@ func newWebsocketClient(server *rpcServer, conn *websocket.Conn, sendChan: make(chan wsResponse, websocketSendBufferSize), quit: make(chan struct{}), } + return client, nil } // handleWebsocketHelp implements the help command for websocket connections. func handleWebsocketHelp(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.HelpCmd) + cmd, ok := icmd.(*dcrjson.HelpCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } // Provide a usage overview of all commands when no specific command @@ -1432,8 +1838,8 @@ func handleWebsocketHelp(wsc *wsClient, icmd interface{}) (interface{}, error) { } } if !valid { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidParameter, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidParameter, Message: "Unknown command: " + command, } } @@ -1454,6 +1860,44 @@ func handleNotifyBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { return nil, nil } +// handleSession implements the session command extension for websocket +// connections. +func handleSession(wsc *wsClient, icmd interface{}) (interface{}, error) { + return &dcrjson.SessionResult{SessionID: wsc.sessionID}, nil +} + +// handleWinningTickets implements the notifywinningtickets command +// extension for websocket connections. +func handleWinningTickets(wsc *wsClient, icmd interface{}) (interface{}, + error) { + wsc.server.ntfnMgr.RegisterWinningTickets(wsc) + return nil, nil +} + +// handleSpentAndMissedTickets implements the notifyspentandmissedtickets command +// extension for websocket connections. +func handleSpentAndMissedTickets(wsc *wsClient, icmd interface{}) (interface{}, + error) { + wsc.server.ntfnMgr.RegisterSpentAndMissedTickets(wsc) + return nil, nil +} + +// handleNewTickets implements the notifynewtickets command extension for +// websocket connections. +func handleNewTickets(wsc *wsClient, icmd interface{}) (interface{}, + error) { + wsc.server.ntfnMgr.RegisterNewTickets(wsc) + return nil, nil +} + +// handleStakeDifficulty implements the notifystakedifficulty command extension +// for websocket connections. +func handleStakeDifficulty(wsc *wsClient, icmd interface{}) (interface{}, + error) { + wsc.server.ntfnMgr.RegisterStakeDifficulty(wsc) + return nil, nil +} + // handleStopNotifyBlocks implements the stopnotifyblocks command extension for // websocket connections. func handleStopNotifyBlocks(wsc *wsClient, icmd interface{}) (interface{}, error) { @@ -1464,11 +1908,10 @@ func handleStopNotifyBlocks(wsc *wsClient, icmd interface{}) (interface{}, error // handleNotifySpent implements the notifyspent command extension for // websocket connections. func handleNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.NotifySpentCmd) + cmd, ok := icmd.(*dcrjson.NotifySpentCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } - outpoints, err := deserializeOutpoints(cmd.OutPoints) if err != nil { return nil, err @@ -1481,9 +1924,9 @@ func handleNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) { // handleNotifyNewTransations implements the notifynewtransactions command // extension for websocket connections. func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.NotifyNewTransactionsCmd) + cmd, ok := icmd.(*dcrjson.NotifyNewTransactionsCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } wsc.verboseTxUpdates = cmd.Verbose != nil && *cmd.Verbose @@ -1501,9 +1944,9 @@ func handleStopNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface // handleNotifyReceived implements the notifyreceived command extension for // websocket connections. func handleNotifyReceived(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.NotifyReceivedCmd) + cmd, ok := icmd.(*dcrjson.NotifyReceivedCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } // Decode addresses to validate input, but the strings slice is used @@ -1520,9 +1963,9 @@ func handleNotifyReceived(wsc *wsClient, icmd interface{}) (interface{}, error) // handleStopNotifySpent implements the stopnotifyspent command extension for // websocket connections. func handleStopNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.StopNotifySpentCmd) + cmd, ok := icmd.(*dcrjson.StopNotifySpentCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } outpoints, err := deserializeOutpoints(cmd.OutPoints) @@ -1540,9 +1983,9 @@ func handleStopNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) // handleStopNotifyReceived implements the stopnotifyreceived command extension // for websocket connections. func handleStopNotifyReceived(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.StopNotifyReceivedCmd) + cmd, ok := icmd.(*dcrjson.StopNotifyReceivedCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } // Decode addresses to validate input, but the strings slice is used @@ -1565,10 +2008,10 @@ func handleStopNotifyReceived(wsc *wsClient, icmd interface{}) (interface{}, err // properly, the function returns an error. Otherwise, nil is returned. func checkAddressValidity(addrs []string) error { for _, addr := range addrs { - _, err := btcutil.DecodeAddress(addr, activeNetParams.Params) + _, err := dcrutil.DecodeAddress(addr, activeNetParams.Params) if err != nil { - return &btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + return &dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: fmt.Sprintf("Invalid address or key: %v", addr), } @@ -1578,15 +2021,16 @@ func checkAddressValidity(addrs []string) error { } // deserializeOutpoints deserializes each serialized outpoint. -func deserializeOutpoints(serializedOuts []btcjson.OutPoint) ([]*wire.OutPoint, error) { +func deserializeOutpoints(serializedOuts []dcrjson.OutPoint) ([]*wire.OutPoint, error) { outpoints := make([]*wire.OutPoint, 0, len(serializedOuts)) for i := range serializedOuts { - blockHash, err := wire.NewShaHashFromStr(serializedOuts[i].Hash) + blockHash, err := chainhash.NewHashFromStr(serializedOuts[i].Hash) if err != nil { return nil, rpcDecodeHexError(serializedOuts[i].Hash) } index := serializedOuts[i].Index - outpoints = append(outpoints, wire.NewOutPoint(blockHash, index)) + tree := serializedOuts[i].Tree + outpoints = append(outpoints, wire.NewOutPoint(blockHash, index, tree)) } return outpoints, nil @@ -1615,15 +2059,33 @@ func (r *rescanKeys) unspentSlice() []*wire.OutPoint { // ErrRescanReorg defines the error that is returned when an unrecoverable // reorganize is detected during a rescan. -var ErrRescanReorg = btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, +var ErrRescanReorg = dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Reorganize", } +// Decred - TODO: This function needs to scan addresses/pks in both tx trees; right +// now it only looks at the regular tx tree // rescanBlock rescans all transactions in a single block. This is a helper // function for handleRescan. -func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { - for _, tx := range blk.Transactions() { +func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *dcrutil.Block, + parent *dcrutil.Block) { + txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, + dcrutil.BlockValid) + + // No need to rescan tx from genesis block. + if parent == nil { + return + } + + allTransactions := make([]*dcrutil.Tx, 0) + + if txTreeRegularValid { + allTransactions = append(allTransactions, parent.Transactions()...) + } + allTransactions = append(allTransactions, blk.STransactions()...) + + for _, tx := range allTransactions { // Hexadecimal representation of this tx. Only created if // needed, and reused for later notifications if already made. var txHex string @@ -1635,7 +2097,15 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { spentNotified := false recvNotified := false - for _, txin := range tx.MsgTx().TxIn { + // Get the stake tx type. + txType := stake.DetermineTxType(tx) + + for i, txin := range tx.MsgTx().TxIn { + // Skip stakebase. + if txType == stake.TxTypeSSGen && i == 0 { + continue + } + if _, ok := lookups.unspent[txin.PreviousOutPoint]; ok { delete(lookups.unspent, txin.PreviousOutPoint) @@ -1646,9 +2116,20 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { if txHex == "" { txHex = txHexString(tx) } - marshalledJSON, err := newRedeemingTxNotification(txHex, tx.Index(), blk) + + var marshalledJSON []byte + var err error + if tx.Tree() == dcrutil.TxTreeRegular { + marshalledJSON, err = newRedeemingTxNotification(txHex, + tx.Tree(), tx.Index(), parent) + } else if tx.Tree() == dcrutil.TxTreeStake { + marshalledJSON, err = newRedeemingTxNotification(txHex, + tx.Tree(), tx.Index(), blk) + } + if err != nil { - rpcsLog.Errorf("Failed to marshal redeemingtx notification: %v", err) + rpcsLog.Errorf("Failed to marshal redeemingtx "+ + "notification: %v", err) continue } @@ -1663,22 +2144,22 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { } for txOutIdx, txout := range tx.MsgTx().TxOut { - _, addrs, _, _ := txscript.ExtractPkScriptAddrs( + _, addrs, _, _ := txscript.ExtractPkScriptAddrs(txout.Version, txout.PkScript, wsc.server.server.chainParams) for _, addr := range addrs { switch a := addr.(type) { - case *btcutil.AddressPubKeyHash: + case *dcrutil.AddressPubKeyHash: if _, ok := lookups.pubKeyHashes[*a.Hash160()]; !ok { continue } - case *btcutil.AddressScriptHash: + case *dcrutil.AddressScriptHash: if _, ok := lookups.scriptHashes[*a.Hash160()]; !ok { continue } - case *btcutil.AddressPubKey: + case *dcrutil.AddressSecpPubKey: found := false switch sa := a.ScriptAddress(); len(sa) { case 33: // Compressed @@ -1705,7 +2186,8 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { // a rescanned P2PKH address, include it as well. if !found { pkh := a.AddressPubKeyHash() - if _, ok := lookups.pubKeyHashes[*pkh.Hash160()]; !ok { + if _, + ok := lookups.pubKeyHashes[*pkh.Hash160()]; !ok { continue } } @@ -1720,9 +2202,19 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { } } - outpoint := wire.OutPoint{ - Hash: *tx.Sha(), - Index: uint32(txOutIdx), + var outpoint wire.OutPoint + if tx.Tree() == dcrutil.TxTreeRegular { + outpoint = wire.OutPoint{ + Hash: *tx.Sha(), + Index: uint32(txOutIdx), + Tree: dcrutil.TxTreeRegular, // decred + } + } else if tx.Tree() == dcrutil.TxTreeStake { + outpoint = wire.OutPoint{ + Hash: *tx.Sha(), + Index: uint32(txOutIdx), + Tree: dcrutil.TxTreeStake, // decred + } } lookups.unspent[outpoint] = struct{}{} @@ -1733,12 +2225,20 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { if txHex == "" { txHex = txHexString(tx) } - ntfn := btcjson.NewRecvTxNtfn(txHex, - blockDetails(blk, tx.Index())) - marshalledJSON, err := btcjson.MarshalCmd(nil, ntfn) + var ntfn *dcrjson.RecvTxNtfn + if tx.Tree() == dcrutil.TxTreeRegular { + ntfn = dcrjson.NewRecvTxNtfn(txHex, blockDetails( + parent, tx.Tree(), tx.Index())) + } else if tx.Tree() == dcrutil.TxTreeStake { + ntfn = dcrjson.NewRecvTxNtfn(txHex, blockDetails( + blk, tx.Tree(), tx.Index())) + } + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) if err != nil { - rpcsLog.Errorf("Failed to marshal recvtx notification: %v", err) + rpcsLog.Errorf("Failed to marshal recvtx "+ + "notification: %v", err) return } @@ -1760,13 +2260,13 @@ func rescanBlock(wsc *wsClient, lookups *rescanKeys, blk *btcutil.Block) { // range of blocks. If this condition does not hold true, the JSON-RPC error // for an unrecoverable reorganize is returned. func recoverFromReorg(db database.Db, minBlock, maxBlock int64, - lastBlock *wire.ShaHash) ([]wire.ShaHash, error) { + lastBlock *dcrutil.Block) ([]chainhash.Hash, error) { hashList, err := db.FetchHeightRange(minBlock, maxBlock) if err != nil { rpcsLog.Errorf("Error looking up block range: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Database error: " + err.Error(), } } @@ -1777,8 +2277,8 @@ func recoverFromReorg(db database.Db, minBlock, maxBlock int64, if err != nil { rpcsLog.Errorf("Error looking up possibly reorged block: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Database error: " + err.Error(), } } @@ -1790,17 +2290,192 @@ func recoverFromReorg(db database.Db, minBlock, maxBlock int64, } // descendantBlock returns the appropiate JSON-RPC error if a current block -// fetched during a reorganize is not a direct child of the parent block hash. -func descendantBlock(prevHash *wire.ShaHash, curBlock *btcutil.Block) error { - curHash := &curBlock.MsgBlock().Header.PrevBlock - if !prevHash.IsEqual(curHash) { +// 'cur' fetched during a reorganize is not a direct child of the parent block +// 'prev'. +func descendantBlock(prev, cur *dcrutil.Block) error { + if prev == nil || cur == nil { + return fmt.Errorf("descendantBlock passed nil block pointer") + } + curSha := &cur.MsgBlock().Header.PrevBlock + prevSha := prev.Sha() + if !prevSha.IsEqual(curSha) { rpcsLog.Errorf("Stopping rescan for reorged block %v "+ - "(replaced by block %v)", prevHash, curHash) + "(replaced by block %v)", prevSha, curSha) return &ErrRescanReorg } return nil } +// scanMempool scans the tx mempool for all requested outpoints/addresses in +// lookups, then issues websocket notifications for relevant transactions. +func scanMempool(wsc *wsClient, lookups *rescanKeys) { + // TODO use optimized structures within mempool, such as outpoints + // and addrindex, to do the work more efficiently. This is very + // expensive to do if the mempool is large. cj + mp := wsc.server.server.txMemPool + mp.RLock() + defer mp.RUnlock() + + for _, txDesc := range mp.pool { + tx := txDesc.Tx + + // Hexadecimal representation of this tx. Only created if + // needed, and reused for later notifications if already made. + var txHex string + + // All inputs and outputs must be iterated through to correctly + // modify the unspent map, however, just a single notification + // for any matching transaction inputs or outputs should be + // created and sent. + spentNotified := false + recvNotified := false + + // Get the stake tx type. + txType := txDesc.Type + + for i, txin := range tx.MsgTx().TxIn { + // Skip stakebase. + if txType == stake.TxTypeSSGen && i == 0 { + continue + } + + if _, ok := lookups.unspent[txin.PreviousOutPoint]; ok { + delete(lookups.unspent, txin.PreviousOutPoint) + + if spentNotified { + continue + } + + if txHex == "" { + txHex = txHexString(tx) + } + + var marshalledJSON []byte + var err error + marshalledJSON, err = newRedeemingTxNotification(txHex, tx.Tree(), + tx.Index(), nil) + if err != nil { + rpcsLog.Errorf("Failed to marshal redeemingtx "+ + "notification: %v", err) + continue + } + + err = wsc.QueueNotification(marshalledJSON) + // Stop the rescan early if the websocket client + // disconnected. + if err == ErrClientQuit { + return + } + spentNotified = true + } + } + + for txOutIdx, txout := range tx.MsgTx().TxOut { + _, addrs, _, _ := txscript.ExtractPkScriptAddrs(txout.Version, + txout.PkScript, wsc.server.server.chainParams) + + for _, addr := range addrs { + switch a := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if _, ok := lookups.pubKeyHashes[*a.Hash160()]; !ok { + continue + } + + case *dcrutil.AddressScriptHash: + if _, ok := lookups.scriptHashes[*a.Hash160()]; !ok { + continue + } + + case *dcrutil.AddressSecpPubKey: + found := false + switch sa := a.ScriptAddress(); len(sa) { + case 33: // Compressed + var key [33]byte + copy(key[:], sa) + if _, ok := lookups.compressedPubkeys[key]; ok { + found = true + } + + case 65: // Uncompressed + var key [65]byte + copy(key[:], sa) + if _, ok := lookups.uncompressedPubkeys[key]; ok { + found = true + } + + default: + rpcsLog.Warnf("Skipping rescanned pubkey of unknown "+ + "serialized length %d", len(sa)) + continue + } + + // If the transaction output pays to the pubkey of + // a rescanned P2PKH address, include it as well. + if !found { + pkh := a.AddressPubKeyHash() + if _, + ok := lookups.pubKeyHashes[*pkh.Hash160()]; !ok { + continue + } + } + + default: + // A new address type must have been added. Encode as a + // payment address string and check the fallback map. + addrStr := addr.EncodeAddress() + _, ok := lookups.fallbacks[addrStr] + if !ok { + continue + } + } + + var outpoint wire.OutPoint + if tx.Tree() == dcrutil.TxTreeRegular { + outpoint = wire.OutPoint{ + Hash: *tx.Sha(), + Index: uint32(txOutIdx), + Tree: dcrutil.TxTreeRegular, // decred + } + } else if tx.Tree() == dcrutil.TxTreeStake { + outpoint = wire.OutPoint{ + Hash: *tx.Sha(), + Index: uint32(txOutIdx), + Tree: dcrutil.TxTreeStake, // decred + } + } + lookups.unspent[outpoint] = struct{}{} + + if recvNotified { + continue + } + + if txHex == "" { + txHex = txHexString(tx) + } + + var ntfn *dcrjson.RecvTxNtfn + ntfn = dcrjson.NewRecvTxNtfn(txHex, blockDetails(nil, tx.Tree(), + tx.Index())) + + marshalledJSON, err := dcrjson.MarshalCmd(nil, ntfn) + if err != nil { + rpcsLog.Errorf("Failed to marshal recvtx "+ + "notification: %v", err) + return + } + + err = wsc.QueueNotification(marshalledJSON) + // Stop the rescan early if the websocket client + // disconnected. + if err == ErrClientQuit { + return + } + recvNotified = true + } + } + } +} + // handleRescan implements the rescan command extension for websocket // connections. // @@ -1812,19 +2487,21 @@ func descendantBlock(prevHash *wire.ShaHash, curBlock *btcutil.Block) error { // the chain (perhaps from a rescanprogress notification) to resume their // rescan. func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { - cmd, ok := icmd.(*btcjson.RescanCmd) + cmd, ok := icmd.(*dcrjson.RescanCmd) if !ok { - return nil, btcjson.ErrRPCInternal + return nil, dcrjson.ErrRPCInternal } outpoints := make([]*wire.OutPoint, 0, len(cmd.OutPoints)) for i := range cmd.OutPoints { - blockHash, err := wire.NewShaHashFromStr(cmd.OutPoints[i].Hash) + blockHash, err := chainhash.NewHashFromStr(cmd.OutPoints[i].Hash) if err != nil { return nil, rpcDecodeHexError(cmd.OutPoints[i].Hash) } index := cmd.OutPoints[i].Index - outpoints = append(outpoints, wire.NewOutPoint(blockHash, index)) + tree := cmd.OutPoints[i].Tree + outpoints = append(outpoints, wire.NewOutPoint(blockHash, index, + tree)) // Decred TODO } numAddrs := len(cmd.Addresses) @@ -1846,23 +2523,23 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { var compressedPubkey [33]byte var uncompressedPubkey [65]byte for _, addrStr := range cmd.Addresses { - addr, err := btcutil.DecodeAddress(addrStr, activeNetParams.Params) + addr, err := dcrutil.DecodeAddress(addrStr, activeNetParams.Params) if err != nil { - jsonErr := btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + jsonErr := dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Rescan address " + addrStr + ": " + err.Error(), } return nil, &jsonErr } switch a := addr.(type) { - case *btcutil.AddressPubKeyHash: + case *dcrutil.AddressPubKeyHash: lookups.pubKeyHashes[*a.Hash160()] = struct{}{} - case *btcutil.AddressScriptHash: + case *dcrutil.AddressScriptHash: lookups.scriptHashes[*a.Hash160()] = struct{}{} - case *btcutil.AddressPubKey: + case *dcrutil.AddressSecpPubKey: pubkeyBytes := a.ScriptAddress() switch len(pubkeyBytes) { case 33: // Compressed @@ -1874,8 +2551,8 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { lookups.uncompressedPubkeys[uncompressedPubkey] = struct{}{} default: - jsonErr := btcjson.RPCError{ - Code: btcjson.ErrRPCInvalidAddressOrKey, + jsonErr := dcrjson.RPCError{ + Code: dcrjson.ErrRPCInvalidAddressOrKey, Message: "Pubkey " + addrStr + " is of unknown length", } return nil, &jsonErr @@ -1894,42 +2571,36 @@ func handleRescan(wsc *wsClient, icmd interface{}) (interface{}, error) { db := wsc.server.server.db - minBlockSha, err := wire.NewShaHashFromStr(cmd.BeginBlock) + minBlockSha, err := chainhash.NewHashFromStr(cmd.BeginBlock) if err != nil { return nil, rpcDecodeHexError(cmd.BeginBlock) } minBlock, err := db.FetchBlockHeightBySha(minBlockSha) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockNotFound, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, Message: "Error getting block: " + err.Error(), } } maxBlock := database.AllShas if cmd.EndBlock != nil { - maxBlockSha, err := wire.NewShaHashFromStr(*cmd.EndBlock) + maxBlockSha, err := chainhash.NewHashFromStr(*cmd.EndBlock) if err != nil { return nil, rpcDecodeHexError(*cmd.EndBlock) } maxBlock, err = db.FetchBlockHeightBySha(maxBlockSha) if err != nil { - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCBlockNotFound, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCBlockNotFound, Message: "Error getting block: " + err.Error(), } } } - // lastBlock and lastBlockHash track the previously-rescanned block. - // They equal nil when no previous blocks have been rescanned. - var lastBlock *btcutil.Block - var lastBlockHash *wire.ShaHash - - // A ticker is created to wait at least 10 seconds before notifying the - // websocket client of the current progress completed by the rescan. - ticker := time.NewTicker(10 * time.Second) - defer ticker.Stop() + // lastBlock tracks the previously-rescanned block. + // It equals nil when no previous blocks have been rescanned. + var lastBlock *dcrutil.Block // FetchHeightRange may not return a complete list of block shas for // the given range, so fetch range as many times as necessary. @@ -1938,11 +2609,12 @@ fetchRange: hashList, err := db.FetchHeightRange(minBlock, maxBlock) if err != nil { rpcsLog.Errorf("Error looking up block range: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Database error: " + err.Error(), } } + if len(hashList) == 0 { // The rescan is finished if no blocks hashes for this // range were successfully fetched and a stop block @@ -1966,7 +2638,9 @@ fetchRange: pauseGuard := wsc.server.server.blockManager.Pause() curHash, _, err := db.NewestSha() again := true - if err == nil && (lastBlockHash == nil || *lastBlockHash == *curHash) { + lastBlockHash := lastBlock.Sha() + if err == nil && (lastBlockHash == nil || + *lastBlockHash == *curHash) { again = false n := wsc.server.ntfnMgr n.RegisterSpentRequests(wsc, lookups.unspentSlice()) @@ -1976,8 +2650,8 @@ fetchRange: if err != nil { rpcsLog.Errorf("Error fetching best block "+ "hash: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Database error: " + err.Error(), } @@ -1997,8 +2671,8 @@ fetchRange: if err != database.ErrBlockShaMissing { rpcsLog.Errorf("Error looking up "+ "block: %v", err) - return nil, &btcjson.RPCError{ - Code: btcjson.ErrRPCDatabase, + return nil, &dcrjson.RPCError{ + Code: dcrjson.ErrRPCDatabase, Message: "Database error: " + err.Error(), } @@ -2025,7 +2699,7 @@ fetchRange: // reevaluated for the new hashList. minBlock += int64(i) hashList, err = recoverFromReorg(db, minBlock, - maxBlock, lastBlockHash) + maxBlock, lastBlock) if err != nil { return nil, err } @@ -2034,10 +2708,57 @@ fetchRange: } goto loopHashList } - if i == 0 && lastBlockHash != nil { + + if i == 0 && lastBlock != nil { // Ensure the new hashList is on the same fork // as the last block from the old hashList. - jsonErr := descendantBlock(lastBlockHash, blk) + jsonErr := descendantBlock(lastBlock, blk) + if jsonErr != nil { + return nil, jsonErr + } + } + + // Fetch the parent too, using the same code as + // described above. + var parent *dcrutil.Block + + // No need to get a parent for the genesis block. + if !hashList[i].IsEqual(activeNetParams.GenesisHash) { + parent, err = db.FetchBlockBySha( + &blk.MsgBlock().Header.PrevBlock) + } else { + parent = nil + err = nil + } + if err != nil { + if err != database.ErrBlockShaMissing { + rpcsLog.Errorf("Error looking up "+ + "block: %v", err) + return nil, err + } + + if maxBlock != database.AllShas { + rpcsLog.Errorf("Stopping rescan for "+ + "reorged block %v", + cmd.EndBlock) + return nil, &ErrRescanReorg + } + + minBlock += int64(i) + hashList, err = recoverFromReorg(db, minBlock, + maxBlock, lastBlock) + if err != nil { + return nil, err + } + if len(hashList) == 0 { + break fetchRange + } + goto loopHashList + } + if i == 0 && parent != nil { + // Ensure the new hashList is on the same fork + // as the last block from the old hashList. + jsonErr := descendantBlock(parent, blk) if jsonErr != nil { return nil, jsonErr } @@ -2051,52 +2772,51 @@ fetchRange: "for disconnected client", blk.Height()) return nil, nil default: - rescanBlock(wsc, &lookups, blk) + rescanBlock(wsc, &lookups, blk, parent) lastBlock = blk - lastBlockHash = blk.Sha() } // Periodically notify the client of the progress // completed. Continue with next block if no progress // notification is needed yet. - select { - case <-ticker.C: // fallthrough - default: - continue - } + if blk.Height()%100 == 0 { + n := dcrjson.NewRescanProgressNtfn(hashList[i].String(), + int32(blk.Height()), + blk.MsgBlock().Header.Timestamp.Unix()) + mn, err := dcrjson.MarshalCmd(nil, n) + if err != nil { + rpcsLog.Errorf("Failed to marshal rescan "+ + "progress notification: %v", err) + continue + } - n := btcjson.NewRescanProgressNtfn(hashList[i].String(), - int32(blk.Height()), - blk.MsgBlock().Header.Timestamp.Unix()) - mn, err := btcjson.MarshalCmd(nil, n) - if err != nil { - rpcsLog.Errorf("Failed to marshal rescan "+ - "progress notification: %v", err) - continue - } - - if err = wsc.QueueNotification(mn); err == ErrClientQuit { - // Finished if the client disconnected. - rpcsLog.Debugf("Stopped rescan at height %v "+ - "for disconnected client", blk.Height()) - return nil, nil + if err = wsc.QueueNotification(mn); err == ErrClientQuit { + // Finished if the client disconnected. + rpcsLog.Debugf("Stopped rescan at height %v "+ + "for disconnected client", blk.Height()) + return nil, nil + } } } minBlock += int64(len(hashList)) } - // Notify websocket client of the finished rescan. Due to how btcd + // Scan the mempool for addresses. + scanMempool(wsc, &lookups) + + // Notify websocket client of the finished rescan. Due to how dcrd // asynchronously queues notifications to not block calling code, // there is no guarantee that any of the notifications created during // rescan (such as rescanprogress, recvtx and redeemingtx) will be // received before the rescan RPC returns. Therefore, another method // is needed to safely inform clients that all rescan notifications have // been sent. - n := btcjson.NewRescanFinishedNtfn(lastBlockHash.String(), + lastBlockHash := lastBlock.Sha() + n := dcrjson.NewRescanFinishedNtfn(lastBlockHash.String(), int32(lastBlock.Height()), lastBlock.MsgBlock().Header.Timestamp.Unix()) - if mn, err := btcjson.MarshalCmd(nil, n); err != nil { + if mn, err := dcrjson.MarshalCmd(nil, n); err != nil { rpcsLog.Errorf("Failed to marshal rescan finished "+ "notification: %v", err) } else { diff --git a/sample-btcd.conf b/sample-dcrd.conf similarity index 75% rename from sample-btcd.conf rename to sample-dcrd.conf index 515a1fc9..f06576dd 100644 --- a/sample-btcd.conf +++ b/sample-dcrd.conf @@ -6,12 +6,12 @@ ; The directory to store data such as the block chain and peer addresses. The ; block chain takes several GB, so this location must have a lot of free space. -; The default is ~/.btcd/data on POSIX OSes, $LOCALAPPDATA/Btcd/data on Windows, -; ~/Library/Application Support/Btcd/data on Mac OS, and $home/btcd/data on +; The default is ~/.dcrd/data on POSIX OSes, $LOCALAPPDATA/Dcrd/data on Windows, +; ~/Library/Application Support/Dcrd/data on Mac OS, and $homed/dcrd/data on ; Plan9. Environment variables are expanded so they may be used. NOTE: Windows ; environment variables are typically %VARIABLE%, but they must be accessed with ; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows. -; datadir=~/.btcd/data +; datadir=~/.dcrd/data ; ------------------------------------------------------------------------------ @@ -29,9 +29,9 @@ ; proxypass= ; The SOCKS5 proxy above is assumed to be Tor (https://www.torproject.org). -; If the proxy is not tor the following may be used to prevent using tor -; specific SOCKS queries to lookup addresses (this increases anonymity when tor -; is used by preventing your IP being leaked via DNS). +; If the proxy is not tor the the following my be used to prevent using +; tor specific SOCKS queries to lookup addresses (this increases anonymity when +; tor is used by preventing your IP being leaked via DNS). ; noonion=1 ; Use an alternative proxy to connect to .onion addresses. The proxy is assumed @@ -52,7 +52,7 @@ ; upnp=1 ; Specify the external IP addresses your node is listening on. One address per -; line. btcd will not contact 3rd-party sites to obtain external ip addresses. +; line. dcrd will not contact 3rd-party sites to obtain external ip addresses. ; This means if you are behind NAT, your node will not be able to advertise a ; reachable address unless you specify it here or enable the 'upnp' option (and ; have a supported device). @@ -64,7 +64,7 @@ ; ; Only one of the following two options, 'addpeer' and 'connect', may be ; specified. Both allow you to specify peers that you want to stay connected -; with, but the behavior is slightly different. By default, btcd will query DNS +; with, but the behavior is slightly different. By default, dcrd will query DNS ; to find peers to connect to, so unless you have a specific reason such as ; those described below, you probably won't need to modify anything here. ; @@ -86,9 +86,9 @@ ; You may specify each IP address with or without a port. The default port will ; be added automatically if one is not specified here. ; addpeer=192.168.1.1 -; addpeer=10.0.0.2:8333 +; addpeer=10.0.0.2:9108 ; addpeer=fe80::1 -; addpeer=[fe80::2]:8333 +; addpeer=[fe80::2]:9108 ; Add persistent peers that you ONLY want to connect to as desired. One peer ; per line. You may specify each IP address with or without a port. The @@ -96,19 +96,19 @@ ; NOTE: Specifying this option has other side effects as described above in ; the 'addpeer' versus 'connect' summary section. ; connect=192.168.1.1 -; connect=10.0.0.2:8333 +; connect=10.0.0.2:9108 ; connect=fe80::1 -; connect=[fe80::2]:8333 +; connect=[fe80::2]:9108 ; Maximum number of inbound and outbound peers. -; maxpeers=125 +; maxpeers=8 ; How long to ban misbehaving peers. Valid time units are {s, m, h}. ; Minimum 1s. ; banduration=24h ; banduration=11h30m15s -; Disable DNS seeding for peers. By default, when btcd starts, it will use +; Disable DNS seeding for peers. By default, when dcrd starts, it will use ; DNS to query for available peers to connect with. ; nodnsseed=1 @@ -122,16 +122,16 @@ ; listen=0.0.0.0 ; All ipv6 interfaces on default port: ; listen=:: -; All interfaces on port 8333: -; listen=:8333 -; All ipv4 interfaces on port 8333: -; listen=0.0.0.0:8333 -; All ipv6 interfaces on port 8333: -; listen=[::]:8333 -; Only ipv4 localhost on port 8333: -; listen=127.0.0.1:8333 -; Only ipv6 localhost on port 8333: -; listen=[::1]:8333 +; All interfaces on port 9108: +; listen=:9108 +; All ipv4 interfaces on port 9108: +; listen=0.0.0.0:9108 +; All ipv6 interfaces on port 9108: +; listen=[::]:9108 +; Only ipv4 localhost on port 9108: +; listen=127.0.0.1:9108 +; Only ipv6 localhost on port 9108: +; listen=[::1]:9108 ; Only ipv4 localhost on non-standard port 8336: ; listen=127.0.0.1:8336 ; All interfaces on non-standard port 8336: @@ -147,42 +147,37 @@ ; ------------------------------------------------------------------------------ ; RPC server options - The following options control the built-in RPC server -; which is used to control and query information from a running btcd process. +; which is used to control and query information from a running dcrd process. ; -; NOTE: The RPC server is disabled by default if rpcuser AND rpcpass, or -; rpclimituser AND rpclimitpass, are not specified. +; NOTE: The RPC server is disabled by default if no rpcuser or rpcpass is +; specified. ; ------------------------------------------------------------------------------ -; Secure the RPC API by specifying the username and password. You can also -; specify a limited username and password. You must specify at least one -; full set of credentials - limited or admin - or the RPC server will -; be disabled. -; rpcuser=whatever_admin_username_you_want +; Secure the RPC API by specifying the username and password. You must specify +; both or the RPC server will be disabled. +; rpcuser=whatever_username_you_want ; rpcpass= -; rpclimituser=whatever_limited_username_you_want -; rpclimitpass= ; Specify the interfaces for the RPC server listen on. One listen address per ; line. NOTE: The default port is modified by some options such as 'testnet', ; so it is recommended to not specify a port and allow a proper default to be -; chosen unless you have a specific reason to do otherwise. By default, the -; RPC server will only listen on localhost for IPv4 and IPv6. -; All interfaces on default port: +; chosen unless you have a specific reason to do otherwise. +; All interfaces on default port (this is the default): ; rpclisten= ; All ipv4 interfaces on default port: ; rpclisten=0.0.0.0 ; All ipv6 interfaces on default port: ; rpclisten=:: -; All interfaces on port 8334: -; rpclisten=:8334 -; All ipv4 interfaces on port 8334: -; rpclisten=0.0.0.0:8334 -; All ipv6 interfaces on port 8334: -; rpclisten=[::]:8334 -; Only ipv4 localhost on port 8334: -; rpclisten=127.0.0.1:8334 -; Only ipv6 localhost on port 8334: -; rpclisten=[::1]:8334 +; All interfaces on port 9109: +; rpclisten=:9109 +; All ipv4 interfaces on port 9109: +; rpclisten=0.0.0.0:9109 +; All ipv6 interfaces on port 9109: +; rpclisten=[::]:9109 +; Only ipv4 localhost on port 9109: +; rpclisten=127.0.0.1:9109 +; Only ipv6 localhost on port 9109: +; rpclisten=[::1]:9109 ; Only ipv4 localhost on non-standard port 8337: ; rpclisten=127.0.0.1:8337 ; All interfaces on non-standard port 8337: @@ -203,25 +198,6 @@ ; server without having to remove credentials from the config file. ; norpc=1 -; Use the following setting to disable TLS for the RPC server. NOTE: This -; option only works if the RPC server is bound to localhost interfaces (which is -; the default). -; notls=1 - -; ------------------------------------------------------------------------------ -; Mempool Settings - The following options -; ------------------------------------------------------------------------------ -; Limit orphan transaction pool to 1000 transactions. -; maxorphantx=1000 - -; ------------------------------------------------------------------------------ -; Optional Transaction Indexes -; ------------------------------------------------------------------------------ - -; Build and maintain a full address-based transaction index. -; addrindex=1 -; Delete the entire address index on start up, then exit. -; dropaddrindex=0 ; ------------------------------------------------------------------------------ ; Coin Generation (Mining) Settings - The following options control the @@ -238,9 +214,9 @@ ; Add addresses to pay mined blocks to for CPU mining and the block templates ; generated for the getwork RPC as desired. One address per line. -; miningaddr=1yourbitcoinaddress -; miningaddr=1yourbitcoinaddress2 -; miningaddr=1yourbitcoinaddress3 +; miningaddr=youraddress +; miningaddr=youraddress2 +; miningaddr=youraddress3 ; Specify the minimum block size in bytes to create. By default, only ; transactions which have enough fees or a high enough priority will be included @@ -250,7 +226,7 @@ ; blockminsize=0 ; Specify the maximum block size in bytes to create. This value will be limited -; to the consensus limit if it is larger than that value. +; to the consensus limit if it is larger than this value. ; blockmaxsize=750000 ; Specify the size in bytes of the high-priority/low-fee area when creating a @@ -269,7 +245,7 @@ ; Debug logging level. ; Valid levels are {trace, debug, info, warn, error, critical} ; You may also specify =,=,... to set -; log level for individual subsystems. Use btcd --debuglevel=show to list +; log level for individual subsystems. Use dcrd --debuglevel=show to list ; available subsystems. ; debuglevel=info diff --git a/server.go b/server.go index 345d64ea..94990edc 100644 --- a/server.go +++ b/server.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -18,13 +19,15 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcd/addrmgr" - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/addrmgr" + "github.com/decred/dcrd/blockchain" + "github.com/decred/dcrd/blockchain/stake" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/database" + "github.com/decred/dcrd/dcrjson" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) const ( @@ -44,10 +47,10 @@ const ( connectionRetryInterval = time.Second * 10 // defaultMaxOutbound is the default number of max outbound peers. - defaultMaxOutbound = 8 + defaultMaxOutbound = 3 ) -// broadcastMsg provides the ability to house a bitcoin message to be broadcast +// broadcastMsg provides the ability to house a decred message to be broadcast // to all connected peers except specified excluded peers. type broadcastMsg struct { message wire.Message @@ -76,13 +79,13 @@ type relayMsg struct { // updates, peer heights will be kept up to date, allowing for fresh data when // selecting sync peer candidacy. type updatePeerHeightsMsg struct { - newSha *wire.ShaHash + newSha *chainhash.Hash newHeight int32 originPeer *peer } -// server provides a bitcoin server for handling communications to and from -// bitcoin peers. +// server provides a decred server for handling communications to and from +// decred peers. type server struct { nonce uint64 listeners []net.Listener @@ -113,6 +116,7 @@ type server struct { nat NAT db database.Db timeSource blockchain.MedianTimeSource + tmdb *stake.TicketDB } type peerState struct { @@ -355,7 +359,7 @@ func (s *server) handleRelayInvMsg(state *peerState, msg relayMsg) { // Don't relay the transaction if there is a bloom // filter loaded and the transaction doesn't match it. if p.filter.IsLoaded() { - tx, ok := msg.data.(*btcutil.Tx) + tx, ok := msg.data.(*dcrutil.Tx) if !ok { peerLog.Warnf("Underlying data for tx" + " inv relay is not a transaction") @@ -400,7 +404,7 @@ type getConnCountMsg struct { } type getPeerInfoMsg struct { - reply chan []*btcjson.GetPeerInfoResult + reply chan []*dcrjson.GetPeerInfoResult } type getAddedNodesMsg struct { @@ -438,7 +442,7 @@ func (s *server) handleQuery(querymsg interface{}, state *peerState) { case getPeerInfoMsg: syncPeer := s.blockManager.SyncPeer() - infos := make([]*btcjson.GetPeerInfoResult, 0, len(state.peers)) + infos := make([]*dcrjson.GetPeerInfoResult, 0, len(state.peers)) state.forAllPeers(func(p *peer) { if !p.Connected() { return @@ -449,7 +453,7 @@ func (s *server) handleQuery(querymsg interface{}, state *peerState) { // and we don't really care if they are raced to get the new // version. p.StatsMtx.Lock() - info := &btcjson.GetPeerInfoResult{ + info := &dcrjson.GetPeerInfoResult{ ID: p.id, Addr: p.addr, Services: fmt.Sprintf("%08d", p.services), @@ -771,8 +775,7 @@ out: // only allow recent nodes (10mins) after we failed 30 // times - if time.Now().After(addr.LastAttempt().Add(10*time.Minute)) && - tries < 30 { + if tries < 30 && time.Now().Sub(addr.LastAttempt()) < 10*time.Minute { continue } @@ -800,7 +803,7 @@ out: } } - if cfg.AddrIndex { + if !cfg.NoAddrIndex { s.addrIndexer.Stop() } s.blockManager.Stop() @@ -843,7 +846,7 @@ func (s *server) ConnectedCount() int32 { return <-replyChan } -// AddedNodeInfo returns an array of btcjson.GetAddedNodeInfoResult structures +// AddedNodeInfo returns an array of dcrjson.GetAddedNodeInfoResult structures // describing the persistent (added) nodes. func (s *server) AddedNodeInfo() []*peer { replyChan := make(chan []*peer) @@ -853,8 +856,8 @@ func (s *server) AddedNodeInfo() []*peer { // PeerInfo returns an array of PeerInfo structures describing all connected // peers. -func (s *server) PeerInfo() []*btcjson.GetPeerInfoResult { - replyChan := make(chan []*btcjson.GetPeerInfoResult) +func (s *server) PeerInfo() []*dcrjson.GetPeerInfoResult { + replyChan := make(chan []*dcrjson.GetPeerInfoResult) s.query <- getPeerInfoMsg{reply: replyChan} @@ -957,7 +960,7 @@ func (s *server) NetTotals() (uint64, uint64) { // the latest connected main chain block, or a recognized orphan. These height // updates allow us to dynamically refresh peer heights, ensuring sync peer // selection has access to the latest block heights for each peer. -func (s *server) UpdatePeerHeights(latestBlkSha *wire.ShaHash, latestHeight int32, updateSource *peer) { +func (s *server) UpdatePeerHeights(latestBlkSha *chainhash.Hash, latestHeight int32, updateSource *peer) { s.peerHeightsUpdate <- updatePeerHeightsMsg{ newSha: latestBlkSha, newHeight: latestHeight, @@ -1064,7 +1067,7 @@ func (s *server) Start() { s.cpuMiner.Start() } - if cfg.AddrIndex { + if !cfg.NoAddrIndex { s.addrIndexer.Start() } } @@ -1206,7 +1209,7 @@ out: // listen port? // XXX this assumes timeout is in seconds. listenPort, err := s.nat.AddPortMapping("tcp", int(lport), int(lport), - "btcd listen port", 20*60) + "dcrd listen port", 20*60) if err != nil { srvrLog.Warnf("can't add UPnP port mapping: %v", err) } @@ -1244,16 +1247,20 @@ out: s.wg.Done() } -// newServer returns a new btcd server configured to listen on addr for the -// bitcoin network type specified by chainParams. Use start to begin accepting +// newServer returns a new dcrd server configured to listen on addr for the +// decred network type specified by chainParams. Use start to begin accepting // connections from peers. -func newServer(listenAddrs []string, db database.Db, chainParams *chaincfg.Params) (*server, error) { +func newServer(listenAddrs []string, + database database.Db, + tmdb *stake.TicketDB, + chainParams *chaincfg.Params) (*server, error) { + nonce, err := wire.RandomUint64() if err != nil { return nil, err } - amgr := addrmgr.New(cfg.DataDir, btcdLookup) + amgr := addrmgr.New(cfg.DataDir, dcrdLookup) var listeners []net.Listener var nat NAT @@ -1395,7 +1402,8 @@ func newServer(listenAddrs []string, db database.Db, chainParams *chaincfg.Param modifyRebroadcastInv: make(chan interface{}), peerHeightsUpdate: make(chan updatePeerHeightsMsg), nat: nat, - db: db, + db: database, + tmdb: tmdb, timeSource: blockchain.NewMedianTime(), } bm, err := newBlockManager(&s) @@ -1406,7 +1414,7 @@ func newServer(listenAddrs []string, db database.Db, chainParams *chaincfg.Param s.txMemPool = newTxMemPool(&s) s.cpuMiner = newCPUMiner(&s) - if cfg.AddrIndex { + if !cfg.NoAddrIndex { ai, err := newAddrIndexer(&s) if err != nil { return nil, err diff --git a/service_windows.go b/service_windows.go index cb4212bc..3dee2891 100644 --- a/service_windows.go +++ b/service_windows.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,55 +17,55 @@ import ( ) const ( - // svcName is the name of btcd service. - svcName = "btcdsvc" + // svcName is the name of dcrd service. + svcName = "dcrdsvc" // svcDisplayName is the service name that will be shown in the windows // services list. Not the svcName is the "real" name which is used // to control the service. This is only for display purposes. - svcDisplayName = "Btcd Service" + svcDisplayName = "Dcrd Service" // svcDesc is the description of the service. - svcDesc = "Downloads and stays synchronized with the bitcoin block " + + svcDesc = "Downloads and stays synchronized with the decred block " + "chain and provides chain services to applications." ) // elog is used to send messages to the Windows event log. var elog *eventlog.Log -// logServiceStartOfDay logs information about btcd when the main server has +// logServiceStartOfDay logs information about dcrd when the main server has // been started to the Windows event log. func logServiceStartOfDay(srvr *server) { var message string message += fmt.Sprintf("Version %s\n", version()) - message += fmt.Sprintf("Configuration directory: %s\n", btcdHomeDir) + message += fmt.Sprintf("Configuration directory: %s\n", dcrdHomeDir) message += fmt.Sprintf("Configuration file: %s\n", cfg.ConfigFile) message += fmt.Sprintf("Data directory: %s\n", cfg.DataDir) elog.Info(1, message) } -// btcdService houses the main service handler which handles all service -// updates and launching btcdMain. -type btcdService struct{} +// dcrdService houses the main service handler which handles all service +// updates and launching dcrdMain. +type dcrdService struct{} // Execute is the main entry point the winsvc package calls when receiving // information from the Windows service control manager. It launches the -// long-running btcdMain (which is the real meat of btcd), handles service +// long-running dcrdMain (which is the real meat of dcrd), handles service // change requests, and notifies the service control manager of changes. -func (s *btcdService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) { +func (s *dcrdService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) { // Service start is pending. const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown changes <- svc.Status{State: svc.StartPending} - // Start btcdMain in a separate goroutine so the service can start + // Start dcrdMain in a separate goroutine so the service can start // quickly. Shutdown (along with a potential error) is reported via // doneChan. serverChan is notified with the main server instance once // it is started so it can be gracefully stopped. doneChan := make(chan error) serverChan := make(chan *server) go func() { - err := btcdMain(serverChan) + err := dcrdMain(serverChan) doneChan <- err }() @@ -89,7 +90,7 @@ loop: // already setup or just break out and allow // the service to exit immediately if it's not // setup yet. Note that calling Stop will cause - // btcdMain to exit in the goroutine above which + // dcrdMain to exit in the goroutine above which // will in turn send a signal (and a potential // error) to doneChan. if mainServer != nil { @@ -120,7 +121,7 @@ loop: return false, 0 } -// installService attempts to install the btcd service. Typically this should +// installService attempts to install the dcrd service. Typically this should // be done by the msi installer, but it is provided here since it can be useful // for development. func installService() error { @@ -174,7 +175,7 @@ func installService() error { return nil } -// removeService attempts to uninstall the btcd service. Typically this should +// removeService attempts to uninstall the dcrd service. Typically this should // be done by the msi uninstaller, but it is provided here since it can be // useful for development. Not the eventlog entry is intentionally not removed // since it would invalidate any existing event log messages. @@ -202,7 +203,7 @@ func removeService() error { return nil } -// startService attempts to start the btcd service. +// startService attempts to start the dcrd service. func startService() error { // Connect to the windows service manager. serviceManager, err := mgr.Connect() @@ -311,7 +312,7 @@ func serviceMain() (bool, error) { } defer elog.Close() - err = svc.Run(svcName, &btcdService{}) + err = svc.Run(svcName, &dcrdService{}) if err != nil { elog.Error(1, fmt.Sprintf("Service start failed: %v", err)) return true, err diff --git a/signal.go b/signal.go index 0c527b27..fd4eaa5d 100644 --- a/signal.go +++ b/signal.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -35,13 +36,13 @@ func mainInterruptHandler() { case <-interruptChannel: // Ignore more than one shutdown signal. if isShutdown { - btcdLog.Infof("Received SIGINT (Ctrl+C). " + + dcrdLog.Infof("Received SIGINT (Ctrl+C). " + "Already shutting down...") continue } isShutdown = true - btcdLog.Infof("Received SIGINT (Ctrl+C). Shutting down...") + dcrdLog.Infof("Received SIGINT (Ctrl+C). Shutting down...") // Run handlers in LIFO order. for i := range interruptCallbacks { diff --git a/txscript/README.md b/txscript/README.md index 70c986c8..c55f4958 100644 --- a/txscript/README.md +++ b/txscript/README.md @@ -1,78 +1,54 @@ txscript ======== -[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] -(https://travis-ci.org/btcsuite/btcd) - -Package txscript implements the bitcoin transaction script language. There is +Package txscript implements the decred transaction script language. There is a comprehensive test suite. Package txscript is licensed under the liberal ISC license. This package has intentionally been designed so it can be used as a standalone -package for any projects needing to use or validate bitcoin transaction scripts. +package for any projects needing to use or validate decred transaction scripts. -## Bitcoin Scripts +## Decred Scripts -Bitcoin provides a stack-based, FORTH-like langauge for the scripts in -the bitcoin transactions. This language is not turing complete -although it is still fairly powerful. A description of the language -can be found at https://en.bitcoin.it/wiki/Script +Decred provides a stack-based, FORTH-like langauge for the scripts in +the decred transactions. This language is not turing complete +although it is still fairly powerful. ## Documentation -[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/txscript?status.png)] -(http://godoc.org/github.com/btcsuite/btcd/txscript) +[![GoDoc](https://godoc.org/github.com/decred/dcrd/txscript?status.png)] +(http://godoc.org/github.com/decred/dcrd/txscript) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site -[here](http://godoc.org/github.com/btcsuite/btcd/txscript). +[here](http://godoc.org/github.com/decred/dcrd/txscript). You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/txscript +http://localhost:6060/pkg/github.com/decred/dcrd/txscript ## Installation ```bash -$ go get github.com/btcsuite/btcd/txscript +$ go get github.com/decred/dcrd/txscript ``` ## Examples * [Standard Pay-to-pubkey-hash Script] - (http://godoc.org/github.com/btcsuite/btcd/txscript#example-PayToAddrScript) - Demonstrates creating a script which pays to a bitcoin address. It also + (http://godoc.org/github.com/decred/dcrd/txscript#example-PayToAddrScript) + Demonstrates creating a script which pays to a decred address. It also prints the created script hex and uses the DisasmString function to display the disassembled script. * [Extracting Details from Standard Scripts] - (http://godoc.org/github.com/btcsuite/btcd/txscript#example-ExtractPkScriptAddrs) + (http://godoc.org/github.com/decred/dcrd/txscript#example-ExtractPkScriptAddrs) Demonstrates extracting information from a standard public key script. * [Manually Signing a Transaction Output] - (http://godoc.org/github.com/btcsuite/btcd/txscript#example-SignTxOutput) + (http://godoc.org/github.com/decred/dcrd/txscript#example-SignTxOutput) Demonstrates manually creating and signing a redeem transaction. -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - ## License Package txscript is licensed under the liberal ISC License. diff --git a/txscript/consensus.go b/txscript/consensus.go new file mode 100644 index 00000000..6ce2e7de --- /dev/null +++ b/txscript/consensus.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +const ( + // LockTimeThreshold is the number below which a lock time is + // interpreted to be a block number. Since an average of one block + // is generated per 10 minutes, this allows blocks for about 9,512 + // years. However, if the field is interpreted as a timestamp, given + // the lock time is a uint32, the max is sometime around 2106. + LockTimeThreshold uint32 = 5e8 // Tue Nov 5 00:53:20 1985 UTC +) diff --git a/txscript/data/script_invalid.json b/txscript/data/script_invalid.json index 5879b1c2..1f6fa1bd 100644 --- a/txscript/data/script_invalid.json +++ b/txscript/data/script_invalid.json @@ -1,7 +1,7 @@ [ ["Format is: [scriptSig, scriptPubKey, flags, ... comments]"], ["It is evaluated as if there was a crediting coinbase transaction with two 0"], -["pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey,"], +["pushes as scriptSig, and one output of 0 atoms and given scriptPubKey,"], ["followed by a spending transaction which spends this output as only input (and"], ["correct prevout hash), using the given scriptSig. All nLockTimes are 0, all"], ["nSequences are max."], @@ -120,26 +120,9 @@ ["NOP", "2SWAP 1", "P2SH,STRICTENC"], ["1", "2 3 2SWAP 1", "P2SH,STRICTENC"], -["'a' 'b'", "CAT", "P2SH,STRICTENC", "CAT disabled"], -["'a' 'b' 0", "IF CAT ELSE 1 ENDIF", "P2SH,STRICTENC", "CAT disabled"], -["'abc' 1 1", "SUBSTR", "P2SH,STRICTENC", "SUBSTR disabled"], -["'abc' 1 1 0", "IF SUBSTR ELSE 1 ENDIF", "P2SH,STRICTENC", "SUBSTR disabled"], -["'abc' 2 0", "IF LEFT ELSE 1 ENDIF", "P2SH,STRICTENC", "LEFT disabled"], -["'abc' 2 0", "IF RIGHT ELSE 1 ENDIF", "P2SH,STRICTENC", "RIGHT disabled"], - ["NOP", "SIZE 1", "P2SH,STRICTENC"], ["'abc'", "IF INVERT ELSE 1 ENDIF", "P2SH,STRICTENC", "INVERT disabled"], -["1 2 0 IF AND ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "AND disabled"], -["1 2 0 IF OR ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "OR disabled"], -["1 2 0 IF XOR ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "XOR disabled"], -["2 0 IF 2MUL ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "2MUL disabled"], -["2 0 IF 2DIV ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "2DIV disabled"], -["2 2 0 IF MUL ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "MUL disabled"], -["2 2 0 IF DIV ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "DIV disabled"], -["2 2 0 IF MOD ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "MOD disabled"], -["2 2 0 IF LSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "LSHIFT disabled"], -["2 2 0 IF RSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "RSHIFT disabled"], ["", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are no stack items"], ["0", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are not 2 stack items"], @@ -152,16 +135,11 @@ ["2147483647 DUP ADD", "4294967294 NUMEQUAL", "P2SH,STRICTENC", "NUMEQUAL must be in numeric range"], ["'abcdef' NOT", "0 EQUAL", "P2SH,STRICTENC", "NOT is an arithmetic operand"], -["2 DUP MUL", "4 EQUAL", "P2SH,STRICTENC", "disabled"], -["2 DUP DIV", "1 EQUAL", "P2SH,STRICTENC", "disabled"], ["2 2MUL", "4 EQUAL", "P2SH,STRICTENC", "disabled"], ["2 2DIV", "1 EQUAL", "P2SH,STRICTENC", "disabled"], -["7 3 MOD", "1 EQUAL", "P2SH,STRICTENC", "disabled"], -["2 2 LSHIFT", "8 EQUAL", "P2SH,STRICTENC", "disabled"], -["2 1 RSHIFT", "1 EQUAL", "P2SH,STRICTENC", "disabled"], -["1","NOP1 NOP2 NOP3 NOP4 NOP5 NOP6 NOP7 NOP8 NOP9 NOP10 2 EQUAL", "P2SH,STRICTENC"], -["'NOP_1_to_10' NOP1 NOP2 NOP3 NOP4 NOP5 NOP6 NOP7 NOP8 NOP9 NOP10","'NOP_1_to_11' EQUAL", "P2SH,STRICTENC"], +["1","NOP1 NOP2 NOP3 NOP4 NOP5 NOP6 NOP7 NOP8 NOP9 NOP10 SSTX SSGEN SSRTX 2 EQUAL", "P2SH,STRICTENC"], +["'NOP_1_to_10' NOP1 NOP2 NOP3 NOP4 NOP5 NOP6 NOP7 NOP8 NOP9 NOP10 SSTX SSGEN SSRTX","'NOP_1_to_11' EQUAL", "P2SH,STRICTENC"], ["Ensure 100% coverage of discouraged NOPS"], ["1", "NOP1", "P2SH,DISCOURAGE_UPGRADABLE_NOPS"], @@ -181,7 +159,7 @@ "P2SH,DISCOURAGE_UPGRADABLE_NOPS", "Discouraged NOP10 in redeemScript"], ["0x50","1", "P2SH,STRICTENC", "opcode 0x50 is reserved"], -["1", "IF 0xba ELSE 1 ENDIF", "P2SH,STRICTENC", "opcodes above NOP10 invalid if executed"], +["1", "IF 0xba ELSE 1 ENDIF", "P2SH,STRICTENC", "opcodes above SSRTX invalid if executed"], ["1", "IF 0xbb ELSE 1 ENDIF", "P2SH,STRICTENC"], ["1", "IF 0xbc ELSE 1 ENDIF", "P2SH,STRICTENC"], ["1", "IF 0xbd ELSE 1 ENDIF", "P2SH,STRICTENC"], @@ -261,31 +239,31 @@ ["NOP", "HASH256", "P2SH,STRICTENC"], ["NOP", -"'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", +"'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", "P2SH,STRICTENC", -">520 byte push"], +">2048 byte push"], ["0", -"IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", +"IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", "P2SH,STRICTENC", -">520 byte push in non-executed IF branch"], +">2048 byte push in non-executed IF branch"], ["1", -"0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", +"0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", "P2SH,STRICTENC", -">201 opcodes executed. 0x61 is NOP"], +">255 opcodes executed. 0x61 is NOP"], ["0", -"IF 0x6161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", +"IF 0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", "P2SH,STRICTENC", -">201 opcodes including non-executed IF branch. 0x61 is NOP"], -["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", +">255 opcodes including non-executed IF branch. 0x61 is NOP"], +["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "1 2 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "P2SH,STRICTENC", -">1,000 stack size (0x6f is 3DUP)"], -["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", -"1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", +">1,024 stack size (0x6f is 3DUP)"], +["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", +"1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "P2SH,STRICTENC", -">1,000 stack+altstack size"], +">1,024 stack+altstack size"], ["NOP", -"0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", +"0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", "P2SH,STRICTENC", "10,001-byte scriptPubKey"], @@ -295,9 +273,6 @@ ["1","VERIF", "P2SH,STRICTENC", "OP_VERIF is reserved"], ["1","VERNOTIF", "P2SH,STRICTENC", "OP_VERNOTIF is reserved"], ["1","RESERVED", "P2SH,STRICTENC", "OP_RESERVED is reserved"], -["1","RESERVED1", "P2SH,STRICTENC", "OP_RESERVED1 is reserved"], -["1","RESERVED2", "P2SH,STRICTENC", "OP_RESERVED2 is reserved"], -["1","0xba", "P2SH,STRICTENC", "0xba == OP_NOP10 + 1"], ["2147483648", "1ADD 1", "P2SH,STRICTENC", "We cannot do math on 5-byte integers"], ["2147483648", "NEGATE 1", "P2SH,STRICTENC", "We cannot do math on 5-byte integers"], @@ -378,21 +353,21 @@ ["", "1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough pubkeys on the stack"], ["", "-1 0 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when the specified number of signatures is negative"], ["", "1 'pk1' 1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough signatures on the stack"], -["", "'dummy' 'sig1' 1 'pk1' 1 CHECKMULTISIG IF 1 ENDIF", "", "CHECKMULTISIG must push false to stack when signature is invalid when NOT in strict enc mode"], +["", "'sig1' 1 'pk1' 1 CHECKMULTISIG IF 1 ENDIF", "", "CHECKMULTISIG must push false to stack when signature is invalid when NOT in strict enc mode"], ["", -"0 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", +"0 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", "P2SH,STRICTENC", -"202 CHECKMULTISIGS, fails due to 201 op limit"], +"202 CHECKMULTISIGS, fails due to 255 op limit"], ["1", "0 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", "P2SH,STRICTENC"], ["", -"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", +"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG ", "P2SH,STRICTENC", -"Fails due to 201 sig op limit"], +"Fails due to 255 sig op limit"], ["1", "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", @@ -599,12 +574,6 @@ "DERSIG", "P2PK NOT with bad sig with too much R padding" ], -[ - "0x47 0x30440220005ece1335e7f657a1a1f476a7fb5bd90964e8a022489f890614a04acfb734c002206c12b8294a6513c7710e8c82d3c23d75cdbfe83200eb7efb495701958501a5d601", - "0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 CHECKSIG NOT", - "", - "P2PK NOT with too much R padding but no DERSIG" -], [ "0x47 0x30440220005ece1335e7f657a1a1f476a7fb5bd90964e8a022489f890614a04acfb734c002206c12b8294a6513c7710e8c82d3c23d75cdbfe83200eb7efb495701958501a5d601", "0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 CHECKSIG NOT", @@ -617,12 +586,6 @@ "DERSIG", "BIP66 example 1, with DERSIG" ], -[ - "0x47 0x304402208e43c0b91f7c1e5bc58e41c8185f8a6086e111b0090187968a86f2822462d3c902200a58f4076b1133b18ff1dc83ee51676e44c60cc608d9534e0df5ace0424fc0be01", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG NOT", - "", - "BIP66 example 2, without DERSIG" -], [ "0x47 0x304402208e43c0b91f7c1e5bc58e41c8185f8a6086e111b0090187968a86f2822462d3c902200a58f4076b1133b18ff1dc83ee51676e44c60cc608d9534e0df5ace0424fc0be01", "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG NOT", @@ -665,18 +628,6 @@ "DERSIG", "BIP66 example 7, with DERSIG" ], -[ - "0 0x47 0x30440220b119d67d389315308d1745f734a51ff3ec72e06081e84e236fdf9dc2f5d2a64802204b04e3bc38674c4422ea317231d642b56dc09d214a1ecbbf16ecca01ed996e2201 0x47 0x3044022079ea80afd538d9ada421b5101febeb6bc874e01dde5bca108c1d0479aec339a4022004576db8f66130d1df686ccf00935703689d69cf539438da1edab208b0d63c4801", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG NOT", - "", - "BIP66 example 8, without DERSIG" -], -[ - "0 0x47 0x30440220b119d67d389315308d1745f734a51ff3ec72e06081e84e236fdf9dc2f5d2a64802204b04e3bc38674c4422ea317231d642b56dc09d214a1ecbbf16ecca01ed996e2201 0x47 0x3044022079ea80afd538d9ada421b5101febeb6bc874e01dde5bca108c1d0479aec339a4022004576db8f66130d1df686ccf00935703689d69cf539438da1edab208b0d63c4801", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG NOT", - "DERSIG", - "BIP66 example 8, with DERSIG" -], [ "0 0 0x47 0x3044022081aa9d436f2154e8b6d600516db03d78de71df685b585a9807ead4210bd883490220534bb6bdf318a419ac0749660b60e78d17d515558ef369bf872eff405b676b2e01", "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG", @@ -725,12 +676,6 @@ "STRICTENC", "P2PK with hybrid pubkey" ], -[ - "0x47 0x30440220035d554e3153c14950c9993f41c496607a8e24093db0595be7bf875cf64fcf1f02204731c8c4e5daf15e706cec19cdd8f2c5b1d05490e11dab8465ed426569b6e92101", - "0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG NOT", - "", - "P2PK NOT with hybrid pubkey but no STRICTENC" -], [ "0x47 0x30440220035d554e3153c14950c9993f41c496607a8e24093db0595be7bf875cf64fcf1f02204731c8c4e5daf15e706cec19cdd8f2c5b1d05490e11dab8465ed426569b6e92101", "0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG NOT", @@ -761,18 +706,6 @@ "STRICTENC", "P2PK NOT with invalid sig and undefined hashtype" ], -[ - "1 0x47 0x3044022051254b9fb476a52d85530792b578f86fea70ec1ffb4393e661bcccb23d8d63d3022076505f94a403c86097841944e044c70c2045ce90e36de51f7e9d3828db98a07501 0x47 0x304402200a358f750934b3feb822f1966bfcd8bbec9eeaa3a8ca941e11ee5960e181fa01022050bf6b5a8e7750f70354ae041cb68a7bade67ec6c3ab19eb359638974410626e01 0x47 0x304402200955d031fff71d8653221e85e36c3c85533d2312fc3045314b19650b7ae2f81002202a6bb8505e36201909d0921f01abff390ae6b7ff97bbf959f98aedeb0a56730901", - "3 0x21 0x0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 3 CHECKMULTISIG", - "NULLDUMMY", - "3-of-3 with nonzero dummy" -], -[ - "1 0x47 0x304402201bb2edab700a5d020236df174fefed78087697143731f659bea59642c759c16d022061f42cdbae5bcd3e8790f20bf76687443436e94a634321c16a72aa54cbc7c2ea01 0x47 0x304402204bb4a64f2a6e5c7fb2f07fef85ee56fde5e6da234c6a984262307a20e99842d702206f8303aaba5e625d223897e2ffd3f88ef1bcffef55f38dc3768e5f2e94c923f901 0x47 0x3044022040c2809b71fffb155ec8b82fe7a27f666bd97f941207be4e14ade85a1249dd4d02204d56c85ec525dd18e29a0533d5ddf61b6b1bb32980c2f63edf951aebf7a27bfe01", - "3 0x21 0x0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 3 CHECKMULTISIG NOT", - "NULLDUMMY", - "3-of-3 NOT with invalid sig with nonzero dummy" -], [ "0 0x47 0x304402200abeb4bd07f84222f474aed558cfbdfc0b4e96cde3c2935ba7098b1ff0bd74c302204a04c1ca67b2a20abee210cf9a21023edccbbf8024b988812634233115c6b73901 DUP", "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 2 CHECKMULTISIG", diff --git a/txscript/data/script_valid.json b/txscript/data/script_valid.json index a4e15fae..7f4a2a6e 100644 --- a/txscript/data/script_valid.json +++ b/txscript/data/script_valid.json @@ -1,7 +1,7 @@ [ ["Format is: [scriptSig, scriptPubKey, flags, ... comments]"], ["It is evaluated as if there was a crediting coinbase transaction with two 0"], -["pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey,"], +["pushes as scriptSig, and one output of 0 atoms and given scriptPubKey,"], ["followed by a spending transaction which spends this output as only input (and"], ["correct prevout hash), using the given scriptSig. All nLockTimes are 0, all"], ["nSequences are max."], @@ -29,6 +29,31 @@ ["0x4d 0x0100 0x08","8 EQUAL", "P2SH,STRICTENC", "0x4d is OP_PUSHDATA2"], ["0x4e 0x01000000 0x09","9 EQUAL", "P2SH,STRICTENC", "0x4e is OP_PUSHDATA4"], +["'a' 'b'", "CAT", "P2SH,STRICTENC"], +["'a' 'b' 0", "IF CAT ELSE 1 ENDIF", "P2SH,STRICTENC"], + +["'abc' 2 1", "SUBSTR", "P2SH,STRICTENC"], +["'abc' 1 1 0", "IF SUBSTR ELSE 1 ENDIF", "P2SH,STRICTENC"], +["'abc' 2 0", "IF LEFT ELSE 1 ENDIF", "P2SH,STRICTENC"], +["'abc' 2 0", "IF RIGHT ELSE 1 ENDIF", "P2SH,STRICTENC"], + +["1 2 0 IF AND ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["1 2 0 IF OR ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["1 2 0 IF XOR ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 0 IF 2MUL ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 0 IF 2DIV ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 2 0 IF MUL ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 2 0 IF DIV ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 2 0 IF MOD ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 2 0 IF LSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], +["2 2 0 IF RSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC"], + +["2 DUP MUL", "4 EQUAL", "P2SH,STRICTENC"], +["2 DUP DIV", "1 EQUAL", "P2SH,STRICTENC"], +["7 3 MOD", "1 EQUAL", "P2SH,STRICTENC"], +["2 2 LSHIFT", "8 EQUAL", "P2SH,STRICTENC"], +["2 1 RSHIFT", "1 EQUAL", "P2SH,STRICTENC"], + ["0x4c 0x00","0 EQUAL", "P2SH,STRICTENC"], ["0x4d 0x0000","0 EQUAL", "P2SH,STRICTENC"], ["0x4e 0x00000000","0 EQUAL", "P2SH,STRICTENC"], @@ -37,7 +62,7 @@ ["0x51", "0x5f ADD 0x60 EQUAL", "P2SH,STRICTENC", "0x51 through 0x60 push 1 through 16 onto stack"], ["1","NOP", "P2SH,STRICTENC"], ["0", "IF VER ELSE 1 ENDIF", "P2SH,STRICTENC", "VER non-functional (ok if not executed)"], -["0", "IF RESERVED RESERVED1 RESERVED2 ELSE 1 ENDIF", "P2SH,STRICTENC", "RESERVED ok in un-executed IF"], +["0", "IF RESERVED ELSE 1 ENDIF", "P2SH,STRICTENC", "RESERVED ok in un-executed IF"], ["1", "DUP IF ENDIF", "P2SH,STRICTENC"], ["1", "IF 1 ENDIF", "P2SH,STRICTENC"], @@ -219,17 +244,17 @@ ["''", "SHA1 0x14 0xda39a3ee5e6b4b0d3255bfef95601890afd80709 EQUAL", "P2SH,STRICTENC"], ["'a'", "SHA1 0x14 0x86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 EQUAL", "P2SH,STRICTENC"], ["'abcdefghijklmnopqrstuvwxyz'", "SHA1 0x14 0x32d10c7b8cf96570ca04ce37f2a19d84240d3a89 EQUAL", "P2SH,STRICTENC"], -["''", "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", "P2SH,STRICTENC"], -["'a'", "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", "P2SH,STRICTENC"], -["'abcdefghijklmnopqrstuvwxyz'", "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", "P2SH,STRICTENC"], +["''", "SHA256 0x20 0x716f6e863f744b9ac22c97ec7b76ea5f5908bc5b2f67c61510bfc4751384ea7a EQUAL", "P2SH,STRICTENC"], +["'a'", "SHA256 0x20 0x43234ff894a9c0590d0246cfc574eb781a80958b01d7a2fa1ac73c673ba5e311 EQUAL", "P2SH,STRICTENC"], +["'abcdefghijklmnopqrstuvwxyz'", "SHA256 0x20 0x6c648655a21f704a0bc72eb367b24144c9e8a1b07efc34165b561b6c33514427 EQUAL", "P2SH,STRICTENC"], ["''", "DUP HASH160 SWAP SHA256 RIPEMD160 EQUAL", "P2SH,STRICTENC"], ["''", "DUP HASH256 SWAP SHA256 SHA256 EQUAL", "P2SH,STRICTENC"], -["''", "NOP HASH160 0x14 0xb472a266d0bd89c13706a4132ccfb16f7c3b9fcb EQUAL", "P2SH,STRICTENC"], -["'a'", "HASH160 NOP 0x14 0x994355199e516ff76c4fa4aab39337b9d84cf12b EQUAL", "P2SH,STRICTENC"], -["'abcdefghijklmnopqrstuvwxyz'", "HASH160 0x4c 0x14 0xc286a1af0947f58d1ad787385b1c2c4a976f9e71 EQUAL", "P2SH,STRICTENC"], -["''", "HASH256 0x20 0x5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456 EQUAL", "P2SH,STRICTENC"], -["'a'", "HASH256 0x20 0xbf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8 EQUAL", "P2SH,STRICTENC"], -["'abcdefghijklmnopqrstuvwxyz'", "HASH256 0x4c 0x20 0xca139bc10c2f660da42666f72e89a225936fc60f193c161124a672050c434671 EQUAL", "P2SH,STRICTENC"], +["''", "NOP HASH160 0x14 0x413320bee32a3bdb92b145d337316739d54a9287 EQUAL", "P2SH,STRICTENC"], +["'a'", "HASH160 NOP 0x14 0x8704ae49993a1b72b8a8b28decc7f9e58be752ce EQUAL", "P2SH,STRICTENC"], +["'abcdefghijklmnopqrstuvwxyz'", "HASH160 0x4c 0x14 0x64d140650ed333a070a8ae05fac65032aebdfe5f EQUAL", "P2SH,STRICTENC"], +["''", "HASH256 0x20 0xd8ee5f957b78a961fb729098b4efb56440a14e05e3c55890f5edbc626380aaa6 EQUAL", "P2SH,STRICTENC"], +["'a'", "HASH256 0x20 0x8a298a038c1a85591aa7abda75a8a393b742ee3f6b759ff15a3b5a8edff78532 EQUAL", "P2SH,STRICTENC"], +["'abcdefghijklmnopqrstuvwxyz'", "HASH256 0x4c 0x20 0x513d6e9cb7f369fa60933bc48818da2cd0c0a079ebdd29a0cce382d4625dcb39 EQUAL", "P2SH,STRICTENC"], ["1","NOP1 NOP2 NOP3 NOP4 NOP5 NOP6 NOP7 NOP8 NOP9 NOP10 1 EQUAL", "P2SH,STRICTENC"], @@ -312,21 +337,21 @@ ["0", "IF 0xff ELSE 1 ENDIF", "P2SH,STRICTENC"], ["NOP", -"'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", +"'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", "P2SH,STRICTENC", -"520 byte push"], +"2048 byte push"], ["1", -"0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", +"0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", "P2SH,STRICTENC", -"201 opcodes executed. 0x61 is NOP"], -["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", +"255 opcodes executed. 0x61 is NOP"], +["1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "P2SH,STRICTENC", -"1,000 stack size (0x6f is 3DUP)"], -["1 TOALTSTACK 2 TOALTSTACK 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", +"1,024 stack size (0x6f is 3DUP)"], +["1 TOALTSTACK 2 TOALTSTACK 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "1 2 3 4 5 6 7 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "P2SH,STRICTENC", -"1,000 stack size (altstack cleared between scriptSig/scriptPubKey)"], +"1,024 stack size (altstack cleared between scriptSig/scriptPubKey)"], ["'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", "P2SH,STRICTENC", @@ -439,7 +464,7 @@ ["0", "SHA256", "P2SH,STRICTENC"], ["0", "HASH160", "P2SH,STRICTENC"], ["0", "HASH256", "P2SH,STRICTENC"], -["NOP", "CODESEPARATOR 1", "P2SH,STRICTENC"], +["NOP", "1", "P2SH,STRICTENC"], ["NOP", "NOP1 1", "P2SH,STRICTENC"], ["NOP", "NOP2 1", "P2SH,STRICTENC"], @@ -452,76 +477,76 @@ ["NOP", "NOP9 1", "P2SH,STRICTENC"], ["NOP", "NOP10 1", "P2SH,STRICTENC"], -["", "0 0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "CHECKMULTISIG is allowed to have zero keys and/or sigs"], -["", "0 0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Zero sigs means no sigs are checked"], -["", "0 0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "CHECKMULTISIG is allowed to have zero keys and/or sigs"], +["", "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Zero sigs means no sigs are checked"], +["", "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "CHECKMULTISIG is allowed to have zero keys and/or sigs"], -["", "0 0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Zero sigs means no sigs are checked"], -["", "0 0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "CHECKMULTISIG is allowed to have zero keys and/or sigs"], +["", "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Zero sigs means no sigs are checked"], +["", "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Test from up to 20 pubkeys, all not checked"], -["", "0 0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], -["", "0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC", "Test from up to 20 pubkeys, all not checked"], +["", "0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], +["", "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", "P2SH,STRICTENC"], ["", -"0 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", +"0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", "P2SH,STRICTENC", "nOpCount is incremented by the number of keys evaluated in addition to the usual one op per op. In this case we have zero keys, so we can execute 201 CHECKMULTISIGS"], ["1", -"0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY 0 0 0 CHECKMULTISIGVERIFY", +"0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", "P2SH,STRICTENC"], ["", -"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", +"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", "P2SH,STRICTENC", "Even though there are no signatures being checked nOpCount is incremented by the number of keys."], ["1", -"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", +"NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", "P2SH,STRICTENC"], -["0 0x01 1", "HASH160 0x14 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", "P2SH,STRICTENC", "Very basic P2SH"], -["0x4c 0 0x01 1", "HASH160 0x14 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", "P2SH,STRICTENC"], +["0 0x01 1", "HASH160 0x14 0xf5a8302ee8695bf836258b8f2b57b38a0be14e47 EQUAL", "P2SH,STRICTENC", "Very basic P2SH"], +["0x4c 0 0x01 1", "HASH160 0x14 0xf5a8302ee8695bf836258b8f2b57b38a0be14e47 EQUAL", "P2SH,STRICTENC"], ["0x40 0x42424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242", "0x4d 0x4000 0x42424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242 EQUAL", @@ -701,211 +726,5 @@ ["0x17 0x3014021077777777777777777777777777777777020001", "0 CHECKSIG NOT", "", "Zero-length S is correctly encoded for DERSIG"], ["0x27 0x302402107777777777777777777777777777777702108777777777777777777777777777777701", "0 CHECKSIG NOT", "", "Negative S is correctly encoded"], -["Automatically generated test cases"], -[ - "0x47 0x304402200a5c6163f07b8d3b013c4d1d6dba25e780b39658d79ba37af7057a3b7f15ffa102201fd9b4eaa9943f734928b99a83592c2e7bf342ea2680f6a2bb705167966b742001", - "0x41 0x0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG", - "", - "P2PK" -], -[ - "0x47 0x304402206e05a6fe23c59196ffe176c9ddc31e73a9885638f9d1328d47c0c703863b8876022076feb53811aa5b04e0e79f938eb19906cc5e67548bc555a8e8b8b0fc603d840c01 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508", - "DUP HASH160 0x14 0x1018853670f9f3b0582c5b9ee8ce93764ac32b93 EQUALVERIFY CHECKSIG", - "", - "P2PKH" -], -[ - "0x47 0x304402204710a85181663b32d25c70ec2bbd14adff5ddfff6cb50d09e155ef5f541fc86c0220056b0cc949be9386ecc5f6c2ac0493269031dbb185781db90171b54ac127790281", - "0x41 0x048282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f5150811f8a8098557dfe45e8256e830b60ace62d613ac2f7b17bed31b6eaff6e26caf CHECKSIG", - "", - "P2PK anyonecanpay" -], -[ - "0x47 0x3044022003fef42ed6c7be8917441218f525a60e2431be978e28b7aca4d7a532cc413ae8022067a1f82c74e8d69291b90d148778405c6257bbcfc2353cc38a3e1f22bf44254601 0x23 0x210279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798ac", - "HASH160 0x14 0x23b0ad3477f2178bc0b3eed26e4e6316f4e83aa1 EQUAL", - "P2SH", - "P2SH(P2PK)" -], -[ - "0x47 0x304402204e2eb034be7b089534ac9e798cf6a2c79f38bcb34d1b179efd6f2de0841735db022071461beb056b5a7be1819da6a3e3ce3662831ecc298419ca101eb6887b5dd6a401 0x19 0x76a9147cf9c846cd4882efec4bf07e44ebdad495c94f4b88ac", - "HASH160 0x14 0x2df519943d5acc0ef5222091f9dfe3543f489a82 EQUAL", - "", - "P2SH(P2PKH), bad sig but no VERIFY_P2SH" -], -[ - "0 0x47 0x3044022051254b9fb476a52d85530792b578f86fea70ec1ffb4393e661bcccb23d8d63d3022076505f94a403c86097841944e044c70c2045ce90e36de51f7e9d3828db98a07501 0x47 0x304402200a358f750934b3feb822f1966bfcd8bbec9eeaa3a8ca941e11ee5960e181fa01022050bf6b5a8e7750f70354ae041cb68a7bade67ec6c3ab19eb359638974410626e01 0x47 0x304402200955d031fff71d8653221e85e36c3c85533d2312fc3045314b19650b7ae2f81002202a6bb8505e36201909d0921f01abff390ae6b7ff97bbf959f98aedeb0a56730901", - "3 0x21 0x0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 3 CHECKMULTISIG", - "", - "3-of-3" -], -[ - "0 0x47 0x304402205b7d2c2f177ae76cfbbf14d589c113b0b35db753d305d5562dd0b61cbf366cfb02202e56f93c4f08a27f986cd424ffc48a462c3202c4902104d4d0ff98ed28f4bf8001 0x47 0x30440220563e5b3b1fc11662a84bc5ea2a32cc3819703254060ba30d639a1aaf2d5068ad0220601c1f47ddc76d93284dd9ed68f7c9974c4a0ea7cbe8a247d6bc3878567a5fca01 0x4c69 0x52210279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179821038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f515082103363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff464053ae", - "HASH160 0x14 0xc9e4a896d149702d0d1695434feddd52e24ad78d EQUAL", - "P2SH", - "P2SH(2-of-3)" -], -[ - "0x47 0x304402200060558477337b9022e70534f1fea71a318caf836812465a2509931c5e7c4987022078ec32bd50ac9e03a349ba953dfd9fe1c8d2dd8bdb1d38ddca844d3d5c78c11801", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG", - "", - "P2PK with too much R padding but no DERSIG" -], -[ - "0x48 0x304502202de8c03fc525285c9c535631019a5f2af7c6454fa9eb392a3756a4917c420edd02210046130bf2baf7cfc065067c8b9e33a066d9c15edcea9feb0ca2d233e3597925b401", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG", - "", - "P2PK with too much S padding but no DERSIG" -], -[ - "0x47 0x30440220d7a0417c3f6d1a15094d1cf2a3378ca0503eb8a57630953a9e2987e21ddd0a6502207a6266d686c99090920249991d3d42065b6d43eb70187b219c0db82e4f94d1a201", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG", - "", - "P2PK with too little R padding but no DERSIG" -], -[ - "0x47 0x30440220005ece1335e7f757a1a1f476a7fb5bd90964e8a022489f890614a04acfb734c002206c12b8294a6513c7710e8c82d3c23d75cdbfe83200eb7efb495701958501a5d601", - "0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 CHECKSIG NOT", - "", - "P2PK NOT with bad sig with too much R padding but no DERSIG" -], -[ - "0x47 0x30440220d7a0417c3f6d1a15094d1cf2a3378ca0503eb8a57630953a9e2987e21ddd0a6502207a6266d686c99090920249991d3d42065b6d43eb70187b219c0db82e4f94d1a201", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG", - "", - "BIP66 example 1, without DERSIG" -], -[ - "0", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG NOT", - "", - "BIP66 example 4, without DERSIG" -], -[ - "0", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG NOT", - "DERSIG", - "BIP66 example 4, with DERSIG" -], -[ - "1", - "0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 CHECKSIG NOT", - "", - "BIP66 example 6, without DERSIG" -], -[ - "0 0x47 0x30440220cae00b1444babfbf6071b0ba8707f6bd373da3df494d6e74119b0430c5db810502205d5231b8c5939c8ff0c82242656d6e06edb073d42af336c99fe8837c36ea39d501 0x47 0x3044022027c2714269ca5aeecc4d70edc88ba5ee0e3da4986e9216028f489ab4f1b8efce022022bd545b4951215267e4c5ceabd4c5350331b2e4a0b6494c56f361fa5a57a1a201", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG", - "", - "BIP66 example 7, without DERSIG" -], -[ - "0 0 0x47 0x30440220da6f441dc3b4b2c84cfa8db0cd5b34ed92c9e01686de5a800d40498b70c0dcac02207c2cf91b0c32b860c4cd4994be36cfb84caf8bb7c3a8e4d96a31b2022c5299c501", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG NOT", - "", - "BIP66 example 10, without DERSIG" -], -[ - "0 0x47 0x30440220b119d67d389315308d1745f734a51ff3ec72e06081e84e236fdf9dc2f5d2a64802204b04e3bc38674c4422ea317231d642b56dc09d214a1ecbbf16ecca01ed996e2201 0", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG NOT", - "", - "BIP66 example 12, without DERSIG" -], -[ - "0 0x47 0x30440220b119d67d389315308d1745f734a51ff3ec72e06081e84e236fdf9dc2f5d2a64802204b04e3bc38674c4422ea317231d642b56dc09d214a1ecbbf16ecca01ed996e2201 0", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 2 CHECKMULTISIG NOT", - "DERSIG", - "BIP66 example 12, with DERSIG" -], -[ - "0x48 0x304402203e4516da7253cf068effec6b95c41221c0cf3a8e6ccb8cbf1725b562e9afde2c022054e1c258c2981cdfba5df1f46661fb6541c44f77ca0092f3600331abfffb12510101", - "0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 CHECKSIG", - "", - "P2PK with multi-byte hashtype, without DERSIG" -], -[ - "0x48 0x304502203e4516da7253cf068effec6b95c41221c0cf3a8e6ccb8cbf1725b562e9afde2c022100ab1e3da73d67e32045a20e0b999e049978ea8d6ee5480d485fcf2ce0d03b2ef001", - "0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 CHECKSIG", - "", - "P2PK with high S but no LOW_S" -], -[ - "0x47 0x3044022057292e2d4dfe775becdd0a9e6547997c728cdf35390f6a017da56d654d374e4902206b643be2fc53763b4e284845bfea2c597d2dc7759941dce937636c9d341b71ed01", - "0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG", - "", - "P2PK with hybrid pubkey but no STRICTENC" -], -[ - "0x47 0x30440220035d554e3153c04950c9993f41c496607a8e24093db0595be7bf875cf64fcf1f02204731c8c4e5daf15e706cec19cdd8f2c5b1d05490e11dab8465ed426569b6e92101", - "0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG NOT", - "", - "P2PK NOT with invalid hybrid pubkey but no STRICTENC" -], -[ - "0 0x47 0x304402202e79441ad1baf5a07fb86bae3753184f6717d9692680947ea8b6e8b777c69af1022079a262e13d868bb5a0964fefe3ba26942e1b0669af1afb55ef3344bc9d4fc4c401", - "1 0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 2 CHECKMULTISIG", - "", - "1-of-2 with the second 1 hybrid pubkey and no STRICTENC" -], -[ - "0 0x47 0x304402202e79441ad1baf5a07fb86bae3753184f6717d9692680947ea8b6e8b777c69af1022079a262e13d868bb5a0964fefe3ba26942e1b0669af1afb55ef3344bc9d4fc4c401", - "1 0x41 0x0679be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 2 CHECKMULTISIG", - "STRICTENC", - "1-of-2 with the second 1 hybrid pubkey" -], -[ - "0x47 0x304402206177d513ec2cda444c021a1f4f656fc4c72ba108ae063e157eb86dc3575784940220666fc66702815d0e5413bb9b1df22aed44f5f1efb8b99d41dd5dc9a5be6d205205", - "0x41 0x048282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f5150811f8a8098557dfe45e8256e830b60ace62d613ac2f7b17bed31b6eaff6e26caf CHECKSIG", - "", - "P2PK with undefined hashtype but no STRICTENC" -], -[ - "0x47 0x304402207409b5b320296e5e2136a7b281a7f803028ca4ca44e2b83eebd46932677725de02202d4eea1c8d3c98e6f42614f54764e6e5e6542e213eb4d079737e9a8b6e9812ec05", - "0x41 0x048282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f5150811f8a8098557dfe45e8256e830b60ace62d613ac2f7b17bed31b6eaff6e26caf CHECKSIG NOT", - "", - "P2PK NOT with invalid sig and undefined hashtype but no STRICTENC" -], -[ - "1 0x47 0x3044022051254b9fb476a52d85530792b578f86fea70ec1ffb4393e661bcccb23d8d63d3022076505f94a403c86097841944e044c70c2045ce90e36de51f7e9d3828db98a07501 0x47 0x304402200a358f750934b3feb822f1966bfcd8bbec9eeaa3a8ca941e11ee5960e181fa01022050bf6b5a8e7750f70354ae041cb68a7bade67ec6c3ab19eb359638974410626e01 0x47 0x304402200955d031fff71d8653221e85e36c3c85533d2312fc3045314b19650b7ae2f81002202a6bb8505e36201909d0921f01abff390ae6b7ff97bbf959f98aedeb0a56730901", - "3 0x21 0x0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 3 CHECKMULTISIG", - "", - "3-of-3 with nonzero dummy but no NULLDUMMY" -], -[ - "1 0x47 0x304402201bb2edab700a5d020236df174fefed78087697143731f659bea59642c759c16d022061f42cdbae5bcd3e8790f20bf76687443436e94a634321c16a72aa54cbc7c2ea01 0x47 0x304402204bb4a64f2a6e5c7fb2f07fef85ee56fde5e6da234c6a984262307a20e99842d702206f8303aaba5e625d223897e2ffd3f88ef1bcffef55f38dc3768e5f2e94c923f901 0x47 0x3044022040c2809b71fffb155ec8b82fe7a27f666bd97f941207be4e14ade85a1249dd4d02204d56c85ec525dd18e29a0533d5ddf61b6b1bb32980c2f63edf951aebf7a27bfe01", - "3 0x21 0x0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x03363d90d447b00c9c99ceac05b6262ee053441c7e55552ffe526bad8f83ff4640 3 CHECKMULTISIG NOT", - "", - "3-of-3 NOT with invalid sig and nonzero dummy but no NULLDUMMY" -], -[ - "0 0x47 0x304402200abeb4bd07f84222f474aed558cfbdfc0b4e96cde3c2935ba7098b1ff0bd74c302204a04c1ca67b2a20abee210cf9a21023edccbbf8024b988812634233115c6b73901 DUP", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 2 CHECKMULTISIG", - "", - "2-of-2 with two identical keys and sigs pushed using OP_DUP but no SIGPUSHONLY" -], -[ - "0 0x47 0x304402200abeb4bd07f84222f474aed558cfbdfc0b4e96cde3c2935ba7098b1ff0bd74c302204a04c1ca67b2a20abee210cf9a21023edccbbf8024b988812634233115c6b73901 0x47 0x304402200abeb4bd07f84222f474aed558cfbdfc0b4e96cde3c2935ba7098b1ff0bd74c302204a04c1ca67b2a20abee210cf9a21023edccbbf8024b988812634233115c6b73901", - "2 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 0x21 0x038282263212c609d9ea2a6e3e172de238d8c39cabd5ac1ca10646e23fd5f51508 2 CHECKMULTISIG", - "SIGPUSHONLY", - "2-of-2 with two identical keys and sigs pushed" -], -[ - "11 0x47 0x304402200a5c6163f07b8d3b013c4d1d6dba25e780b39658d79ba37af7057a3b7f15ffa102201fd9b4eaa9943f734928b99a83592c2e7bf342ea2680f6a2bb705167966b742001", - "0x41 0x0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG", - "P2SH", - "P2PK with unnecessary input but no CLEANSTACK" -], -[ - "11 0x47 0x304402202f7505132be14872581f35d74b759212d9da40482653f1ffa3116c3294a4a51702206adbf347a2240ca41c66522b1a22a41693610b76a8e7770645dc721d1635854f01 0x43 0x410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ac", - "HASH160 0x14 0x31edc23bdafda4639e669f89ad6b2318dd79d032 EQUAL", - "P2SH", - "P2SH with unnecessary input but no CLEANSTACK" -], -[ - "0x47 0x304402202f7505132be14872581f35d74b759212d9da40482653f1ffa3116c3294a4a51702206adbf347a2240ca41c66522b1a22a41693610b76a8e7770645dc721d1635854f01 0x43 0x410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ac", - "HASH160 0x14 0x31edc23bdafda4639e669f89ad6b2318dd79d032 EQUAL", - "CLEANSTACK,P2SH", - "P2SH with CLEANSTACK" -], - ["The End"] ] diff --git a/txscript/data/tx_invalid.json b/txscript/data/tx_invalid.json index fa133340..56d65fe5 100644 --- a/txscript/data/tx_invalid.json +++ b/txscript/data/tx_invalid.json @@ -31,14 +31,14 @@ ["Tests for CheckTransaction()"], ["No inputs"], -["Skipped because this is not checked by btcscript, this is a problem for chain."], +["Skipped because this is not checked by dcrscript, this is a problem for chain."], ["No outputs"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x05ab9e14d983742513f0f451e105ffb4198d1dd4 EQUAL"]], "01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022100f16703104aab4e4088317c862daec83440242411b039d14280e03dd33b487ab802201318a7be236672c5c56083eb7a5a195bc57a40af7923ff8545016cd3b571e2a601232103c40e5d339df3f30bf753e7e04450ae4ef76c9e45587d1d993bdc4cd06f0651c7acffffffff0000000000", "P2SH"], ["Negative output"], -["Removed because btcscript doesn't do tx sanity checking."], +["Removed because dcrscript doesn't do tx sanity checking."], ["MAX_MONEY + 1 output"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x32afac281462b822adbec5094b8d4d337dd5bd6a EQUAL"]], @@ -49,18 +49,18 @@ "01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022027deccc14aa6668e78a8c9da3484fbcd4f9dcc9bb7d1b85146314b21b9ae4d86022100d0b43dece8cfb07348de0ca8bc5b86276fa88f7f2138381128b7c36ab2e42264012321029bb13463ddd5d2cc05da6e84e37536cb9525703cfd8f43afdb414988987a92f6acffffffff020040075af075070001510001000000000000015100000000", "P2SH"], ["Duplicate inputs"], -["Removed because btcscript doesn't check input duplication, btcchain does"], +["Removed because dcrscript doesn't check input duplication, chain does"], ["Coinbase of size 1"], ["Note the input is just required to make the tester happy"], -["Removed because btcscript doesn't handle coinbase checking, btcchain does"], +["Removed because dcrscript doesn't handle coinbase checking, chain does"], ["Coinbase of size 101"], ["Note the input is just required to make the tester happy"], -["Removed because btcscript doesn't handle coinbase checking, btcchain does"], +["Removed because dcrscript doesn't handle coinbase checking, chain does"], ["Null txin"], -["Removed because btcscript doesn't do tx sanity checking."], +["Removed because dcrscript doesn't do tx sanity checking."], ["Same as the transactions in valid with one input SIGHASH_ALL and one SIGHASH_ANYONECANPAY, but we set the _ANYONECANPAY sequence number, invalidating the SIGHASH_ALL signature"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG"], @@ -79,26 +79,6 @@ "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004847304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH"], -["CHECKMULTISIG SCRIPT_VERIFY_NULLDUMMY tests:"], - -["The following is a tweaked form of 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63"], -["It is an OP_CHECKMULTISIG with the dummy value set to something other than an empty string"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a010047304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH,NULLDUMMY"], - -["As above, but using a OP_1"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000495147304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH,NULLDUMMY"], - -["As above, but using a OP_1NEGATE"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000494f47304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH,NULLDUMMY"], - -["As above, but with the dummy byte missing"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004847304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH,NULLDUMMY"], - - ["Empty stack when we try to run CHECKSIG"], [[["ad503f72c18df5801ee64d76090afe4c607fb2b822e9b7b63c5826c50e22fc3b", 0, "0x21 0x027c3a97665bf283a102a587a62a30a0c102d4d3b141015e2cae6f64e2543113e5 CHECKSIG NOT"]], "01000000013bfc220ec526583cb6b7e922b8b27f604cfe0a09764de61e80f58dc1723f50ad0000000000ffffffff0101000000000000002321027c3a97665bf283a102a587a62a30a0c102d4d3b141015e2cae6f64e2543113e5ac00000000", "P2SH"], @@ -114,6 +94,78 @@ [[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "IF CODESEPARATOR ENDIF 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 CHECKSIGVERIFY CODESEPARATOR 1"]], "010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a483045022100fa4a74ba9fd59c59f46c3960cf90cbe0d2b743c471d24a3d5d6db6002af5eebb02204d70ec490fd0f7055a7c45f86514336e3a7f03503dacecabb247fc23f15c83510100ffffffff010000000000000000016a00000000", "P2SH"], +["CHECKLOCKTIMEVERIFY tests"], + +["By-height locks, with argument just beyond tx nLockTime"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "499999999 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000fe64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], + +["By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000001 NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967295 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000feffffff", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Argument missing"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000001b1010000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Argument negative with by-blockheight nLockTime=0"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "-1 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Argument negative with by-blocktime nLockTime=500,000,000"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "-1 NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000004005194b1010000000100000000000000000002000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Input locked"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b1ffffffff0100000000000000000002000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Another input being unlocked isn't sufficient; the CHECKLOCKTIMEVERIFY-using input must be unlocked"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"] , + ["0000000000000000000000000000000000000000000000000000000000000200", 1, "1"]], +"010000000200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00020000000000000000000000000000000000000000000000000000000000000100000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Argument/tx height/time mismatch, both versions"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b100000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "499999999 NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000000 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000000 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Argument 2^32 with nLockTime=2^32-1"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967296 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Same, but with nLockTime=2^31-1"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "2147483648 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffff7f", "P2SH,CHECKLOCKTIMEVERIFY"], + +["6 byte non-minimally-encoded arguments are invalid even if their contents are valid"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x06 0x000000000000 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Failure due to failing CHECKLOCKTIMEVERIFY in scriptSig"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b1000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], + +["Failure due to failing CHECKLOCKTIMEVERIFY in redeemScript"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0xc5b93064159b3b2d6ab506a41b1f50463771b988 EQUAL"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000030251b1000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], ["Make diffs cleaner by leaving a comment here without comma at the end"] ] diff --git a/txscript/data/tx_valid.json b/txscript/data/tx_valid.json index 182b88ef..b6c074fe 100644 --- a/txscript/data/tx_valid.json +++ b/txscript/data/tx_valid.json @@ -5,74 +5,6 @@ ["serializedTransaction, verifyFlags]"], ["Objects that are only a single string (like this one) are ignored"], -["The following is 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63"], -["It is of particular interest because it contains an invalidly-encoded signature which OpenSSL accepts"], -["See http://r6.ca/blog/20111119T211504Z.html"], -["It is also the first OP_CHECKMULTISIG transaction in standard form"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000490047304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH"], - -["The following is a tweaked form of 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63"], -["It is an OP_CHECKMULTISIG with an arbitrary extra byte stuffed into the signature at pos length - 2"], -["The dummy byte is fine however, so the NULLDUMMY flag should be happy"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a0048304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2bab01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH,NULLDUMMY"], - -["The following is a tweaked form of 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63"], -["It is an OP_CHECKMULTISIG with the dummy value set to something other than an empty string"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a01ff47304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH"], - -["As above, but using a OP_1"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000495147304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH"], - -["As above, but using a OP_1NEGATE"], -[[["60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", 0, "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG"]], -"0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba26000000000494f47304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", "P2SH"], - -["The following is c99c49da4c38af669dea436d3e73780dfdb6c1ecf9958baa52960e8baee30e73"], -["It is of interest because it contains a 0-sequence as well as a signature of SIGHASH type 0 (which is not a real type)"], -[[["406b2b06bcd34d3c8733e6b79f7a394c8a431fbf4ff5ac705c93f4076bb77602", 0, "DUP HASH160 0x14 0xdc44b1164188067c3a32d4780f5996fa14a4f2d9 EQUALVERIFY CHECKSIG"]], -"01000000010276b76b07f4935c70acf54fbf1f438a4c397a9fb7e633873c4dd3bc062b6b40000000008c493046022100d23459d03ed7e9511a47d13292d3430a04627de6235b6e51a40f9cd386f2abe3022100e7d25b080f0bb8d8d5f878bba7d54ad2fda650ea8d158a33ee3cbd11768191fd004104b0e2c879e4daf7b9ab68350228c159766676a14f5815084ba166432aab46198d4cca98fa3e9981d0a90b2effc514b76279476550ba3663fdcaff94c38420e9d5000000000100093d00000000001976a9149a7b0f3b80c6baaeedce0a0842553800f832ba1f88ac00000000", "P2SH"], - -["A nearly-standard transaction with CHECKSIGVERIFY 1 instead of CHECKSIG"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1"]], -"01000000010001000000000000000000000000000000000000000000000000000000000000000000006a473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", "P2SH"], - -["Same as above, but with the signature duplicated in the scriptPubKey with the proper pushdata prefix"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1 0x47 0x3044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a01"]], -"01000000010001000000000000000000000000000000000000000000000000000000000000000000006a473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", "P2SH"], - -["The following is f7fdd091fa6d8f5e7a8c2458f5c38faffff2d3f1406b6e4fe2c99dcc0d2d1cbb"], -["It caught a bug in the workaround for 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63 in an overly simple implementation"], -[[["b464e85df2a238416f8bdae11d120add610380ea07f4ef19c5f9dfd472f96c3d", 0, "DUP HASH160 0x14 0xbef80ecf3a44500fda1bc92176e442891662aed2 EQUALVERIFY CHECKSIG"], -["b7978cc96e59a8b13e0865d3f95657561a7f725be952438637475920bac9eb21", 1, "DUP HASH160 0x14 0xbef80ecf3a44500fda1bc92176e442891662aed2 EQUALVERIFY CHECKSIG"]], -"01000000023d6cf972d4dff9c519eff407ea800361dd0a121de1da8b6f4138a2f25de864b4000000008a4730440220ffda47bfc776bcd269da4832626ac332adfca6dd835e8ecd83cd1ebe7d709b0e022049cffa1cdc102a0b56e0e04913606c70af702a1149dc3b305ab9439288fee090014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff21ebc9ba20594737864352e95b727f1a565756f9d365083eb1a8596ec98c97b7010000008a4730440220503ff10e9f1e0de731407a4a245531c9ff17676eda461f8ceeb8c06049fa2c810220c008ac34694510298fa60b3f000df01caa244f165b727d4896eb84f81e46bcc4014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff01f0da5200000000001976a914857ccd42dded6df32949d4646dfa10a92458cfaa88ac00000000", "P2SH"], - -["The following tests for the presence of a bug in the handling of SIGHASH_SINGLE"], -["It results in signing the constant 1, instead of something generated based on the transaction,"], -["when the input doing the signing has an index greater than the maximum output index"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG"], ["0000000000000000000000000000000000000000000000000000000000000200", 0, "1"]], -"01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000", "P2SH"], - -["An invalid P2SH Transaction"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x7a052c840ba73af26755de42cf01cc9e0a49fef0 EQUAL"]], -"010000000100010000000000000000000000000000000000000000000000000000000000000000000009085768617420697320ffffffff010000000000000000015100000000", "NONE"], - -["A valid P2SH Transaction using the standard transaction type put forth in BIP 16"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x8febbed40483661de6958d957412f82deed8e2f7 EQUAL"]], -"01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100c66c9cdf4c43609586d15424c54707156e316d88b0a1534c9e6b0d4f311406310221009c0fe51dbc9c4ab7cc25d3fdbeccf6679fe6827f08edf2b4a9f16ee3eb0e438a0123210338e8034509af564c62644c07691942e0c056752008a173c89f60ab2a88ac2ebfacffffffff010000000000000000015100000000", "P2SH"], - -["Tests for CheckTransaction()"], -["MAX_MONEY output"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x32afac281462b822adbec5094b8d4d337dd5bd6a EQUAL"]], -"01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100e1eadba00d9296c743cb6ecc703fd9ddc9b3cd12906176a226ae4c18d6b00796022100a71aef7d2874deff681ba6080f1b278bac7bb99c61b08a85f4311970ffe7f63f012321030c0588dc44d92bdcbf8e72093466766fdc265ead8db64517b0c542275b70fffbacffffffff010040075af0750700015100000000", "P2SH"], - -["MAX_MONEY output + 0 output"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0xb558cbf4930954aa6a344363a15668d7477ae716 EQUAL"]], -"01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022027deccc14aa6668e78a8c9da3484fbcd4f9dcc9bb7d1b85146314b21b9ae4d86022100d0b43dece8cfb07348de0ca8bc5b86276fa88f7f2138381128b7c36ab2e42264012321029bb13463ddd5d2cc05da6e84e37536cb9525703cfd8f43afdb414988987a92f6acffffffff020040075af075070001510000000000000000015100000000", "P2SH"], - ["Coinbase of size 2"], ["Note the input is just required to make the tester happy"], [[["0000000000000000000000000000000000000000000000000000000000000000", -1, "1"]], @@ -83,35 +15,6 @@ [[["0000000000000000000000000000000000000000000000000000000000000000", -1, "1"]], "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff6451515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151ffffffff010000000000000000015100000000", "P2SH"], -["Simple transaction with first input is signed with SIGHASH_ALL, second with SIGHASH_ANYONECANPAY"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG"], - ["0000000000000000000000000000000000000000000000000000000000000200", 0, "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG"]], - "010000000200010000000000000000000000000000000000000000000000000000000000000000000049483045022100d180fd2eb9140aeb4210c9204d3f358766eb53842b2a9473db687fa24b12a3cc022079781799cd4f038b85135bbe49ec2b57f306b2bb17101b17f71f000fcab2b6fb01ffffffff0002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000", "P2SH"], - -["Same as above, but we change the sequence number of the first input to check that SIGHASH_ANYONECANPAY is being followed"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG"], - ["0000000000000000000000000000000000000000000000000000000000000200", 0, "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG"]], - "01000000020001000000000000000000000000000000000000000000000000000000000000000000004948304502203a0f5f0e1f2bdbcd04db3061d18f3af70e07f4f467cbc1b8116f267025f5360b022100c792b6e215afc5afc721a351ec413e714305cb749aae3d7fee76621313418df101010000000002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000", "P2SH"], - -["afd9c17f8913577ec3509520bd6e5d63e9c0fd2a5f70c787993b097ba6ca9fae which has several SIGHASH_SINGLE signatures"], -[[["63cfa5a09dc540bf63e53713b82d9ea3692ca97cd608c384f2aa88e51a0aac70", 0, "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG"], - ["04e8d0fcf3846c6734477b98f0f3d4badfb78f020ee097a0be5fe347645b817d", 1, "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG"], - ["ee1377aff5d0579909e11782e1d2f5f7b84d26537be7f5516dd4e43373091f3f", 1, "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG"]], - "010000000370ac0a1ae588aaf284c308d67ca92c69a39e2db81337e563bf40c59da0a5cf63000000006a4730440220360d20baff382059040ba9be98947fd678fb08aab2bb0c172efa996fd8ece9b702201b4fb0de67f015c90e7ac8a193aeab486a1f587e0f54d0fb9552ef7f5ce6caec032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff7d815b6447e35fbea097e00e028fb7dfbad4f3f0987b4734676c84f3fcd0e804010000006b483045022100c714310be1e3a9ff1c5f7cacc65c2d8e781fc3a88ceb063c6153bf950650802102200b2d0979c76e12bb480da635f192cc8dc6f905380dd4ac1ff35a4f68f462fffd032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff3f1f097333e4d46d51f5e77b53264db8f7f5d2e18217e1099957d0f5af7713ee010000006c493046022100b663499ef73273a3788dea342717c2640ac43c5a1cf862c9e09b206fcb3f6bb8022100b09972e75972d9148f2bdd462e5cb69b57c1214b88fc55ca638676c07cfc10d8032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff0380841e00000000001976a914bfb282c70c4191f45b5a6665cad1682f2c9cfdfb88ac80841e00000000001976a9149857cc07bed33a5cf12b9c5e0500b675d500c81188ace0fd1c00000000001976a91443c52850606c872403c0601e69fa34b26f62db4a88ac00000000", "P2SH"], - - ["ddc454a1c0c35c188c98976b17670f69e586d9c0f3593ea879928332f0a069e7, which spends an input that pushes using a PUSHDATA1 that is negative when read as signed"], - [[["c5510a5dd97a25f43175af1fe649b707b1df8e1a41489bac33a23087027a2f48", 0, "0x4c 0xae 0x606563686f2022553246736447566b58312b5a536e587574356542793066794778625456415675534a6c376a6a334878416945325364667657734f53474f36633338584d7439435c6e543249584967306a486956304f376e775236644546673d3d22203e20743b206f70656e73736c20656e63202d7061737320706173733a5b314a564d7751432d707269766b65792d6865785d202d64202d6165732d3235362d636263202d61202d696e207460 DROP DUP HASH160 0x14 0xbfd7436b6265aa9de506f8a994f881ff08cc2872 EQUALVERIFY CHECKSIG"]], - "0100000001482f7a028730a233ac9b48411a8edfb107b749e61faf7531f4257ad95d0a51c5000000008b483045022100bf0bbae9bde51ad2b222e87fbf67530fbafc25c903519a1e5dcc52a32ff5844e022028c4d9ad49b006dd59974372a54291d5764be541574bb0c4dc208ec51f80b7190141049dd4aad62741dc27d5f267f7b70682eee22e7e9c1923b9c0957bdae0b96374569b460eb8d5b40d972e8c7c0ad441de3d94c4a29864b212d56050acb980b72b2bffffffff0180969800000000001976a914e336d0017a9d28de99d16472f6ca6d5a3a8ebc9988ac00000000", "P2SH"], - -["Correct signature order"], -["Note the input is just required to make the tester happy"], -[[["b3da01dd4aae683c7aee4d5d8b52a540a508e1115f77cd7fa9a291243f501223", 0, "HASH160 0x14 0xb1ce99298d5f07364b57b1e5c9cc00be0b04a954 EQUAL"]], -"01000000012312503f2491a2a97fcd775f11e108a540a5528b5d4dee7a3c68ae4add01dab300000000fdfe0000483045022100f6649b0eddfdfd4ad55426663385090d51ee86c3481bdc6b0c18ea6c0ece2c0b0220561c315b07cffa6f7dd9df96dbae9200c2dee09bf93cc35ca05e6cdf613340aa0148304502207aacee820e08b0b174e248abd8d7a34ed63b5da3abedb99934df9fddd65c05c4022100dfe87896ab5ee3df476c2655f9fbe5bd089dccbef3e4ea05b5d121169fe7f5f4014c695221031d11db38972b712a9fe1fc023577c7ae3ddb4a3004187d41c45121eecfdbb5b7210207ec36911b6ad2382860d32989c7b8728e9489d7bbc94a6b5509ef0029be128821024ea9fac06f666a4adc3fc1357b7bec1fd0bdece2b9d08579226a8ebde53058e453aeffffffff0180380100000000001976a914c9b99cddf847d10685a4fabaa0baf505f7c3dfab88ac00000000", "P2SH"], - -["cc60b1f899ec0a69b7c3f25ddf32c4524096a9c5b01cbd84c6d0312a0c478984, which is a fairly strange transaction which relies on OP_CHECKSIG returning 0 when checking a completely invalid sig of length 0"], -[[["cbebc4da731e8995fe97f6fadcd731b36ad40e5ecb31e38e904f6e5982fa09f7", 0, "0x2102085c6600657566acc2d6382a47bc3f324008d2aa10940dd7705a48aa2a5a5e33ac7c2103f5d0fb955f95dd6be6115ce85661db412ec6a08abcbfce7da0ba8297c6cc0ec4ac7c5379a820d68df9e32a147cffa36193c6f7c43a1c8c69cda530e1c6db354bfabdcfefaf3c875379a820f531f3041d3136701ea09067c53e7159c8f9b2746a56c3d82966c54bbc553226879a5479827701200122a59a5379827701200122a59a6353798277537982778779679a68"]], -"0100000001f709fa82596e4f908ee331cb5e0ed46ab331d7dcfaf697fe95891e73dac4ebcb000000008c20ca42095840735e89283fec298e62ac2ddea9b5f34a8cbb7097ad965b87568100201b1b01dc829177da4a14551d2fc96a9db00c6501edfa12f22cd9cefd335c227f483045022100a9df60536df5733dd0de6bc921fab0b3eee6426501b43a228afa2c90072eb5ca02201c78b74266fac7d1db5deff080d8a403743203f109fbcabf6d5a760bf87386d20100ffffffff01c075790000000000232103611f9a45c18f28f06f19076ad571c344c82ce8fcfe34464cf8085217a2d294a6ac00000000", "P2SH"], - ["Empty pubkey"], [[["229257c295e7f555421c1bfec8538dd30a4b5c37c1c8810bbe83cafa7811652c", 0, "0x00 CHECKSIG NOT"]], "01000000012c651178faca83be0b81c8c1375c4b0ad38d53c8fe1b1c4255f5e795c25792220000000049483045022100d6044562284ac76c985018fc4a90127847708c9edb280996c507b28babdc4b2a02203d74eca3f1a4d1eea7ff77b528fde6d5dc324ec2dbfdb964ba885f643b9704cd01ffffffff010100000000000000232102c2410f8891ae918cab4ffc4bb4a3b0881be67c7a1e7faa8b5acf9ab8932ec30cac00000000", "P2SH"], @@ -120,72 +23,53 @@ [[["9ca93cfd8e3806b9d9e2ba1cf64e3cc6946ee0119670b1796a09928d14ea25f7", 0, "0x21 0x028a1d66975dbdf97897e3a4aef450ebeb5b5293e4a0b4a6d3a2daaa0b2b110e02 CHECKSIG NOT"]], "0100000001f725ea148d92096a79b1709611e06e94c63c4ef61cbae2d9b906388efd3ca99c000000000100ffffffff0101000000000000002321028a1d66975dbdf97897e3a4aef450ebeb5b5293e4a0b4a6d3a2daaa0b2b110e02ac00000000", "P2SH"], -[[["444e00ed7840d41f20ecd9c11d3f91982326c731a02f3c05748414a4fa9e59be", 0, "1 0x00 0x21 0x02136b04758b0b6e363e7a6fbe83aaf527a153db2b060d36cc29f7f8309ba6e458 2 CHECKMULTISIG"]], -"0100000001be599efaa4148474053c2fa031c7262398913f1dc1d9ec201fd44078ed004e44000000004900473044022022b29706cb2ed9ef0cb3c97b72677ca2dfd7b4160f7b4beb3ba806aa856c401502202d1e52582412eba2ed474f1f437a427640306fd3838725fab173ade7fe4eae4a01ffffffff010100000000000000232103ac4bba7e7ca3e873eea49e08132ad30c7f03640b6539e9b59903cf14fd016bbbac00000000", "P2SH"], - -[[["e16abbe80bf30c080f63830c8dbf669deaef08957446e95940227d8c5e6db612", 0, "1 0x21 0x03905380c7013e36e6e19d305311c1b81fce6581f5ee1c86ef0627c68c9362fc9f 0x00 2 CHECKMULTISIG"]], -"010000000112b66d5e8c7d224059e946749508efea9d66bf8d0c83630f080cf30be8bb6ae100000000490047304402206ffe3f14caf38ad5c1544428e99da76ffa5455675ec8d9780fac215ca17953520220779502985e194d84baa36b9bd40a0dbd981163fa191eb884ae83fc5bd1c86b1101ffffffff010100000000000000232103905380c7013e36e6e19d305311c1b81fce6581f5ee1c86ef0627c68c9362fc9fac00000000", "P2SH"], - [[["ebbcf4bfce13292bd791d6a65a2a858d59adbf737e387e40370d4e64cc70efb0", 0, "2 0x21 0x033bcaa0a602f0d44cc9d5637c6e515b0471db514c020883830b7cefd73af04194 0x21 0x03a88b326f8767f4f192ce252afe33c94d25ab1d24f27f159b3cb3aa691ffe1423 2 CHECKMULTISIG NOT"]], "0100000001b0ef70cc644e0d37407e387e73bfad598d852a5aa6d691d72b2913cebff4bceb000000004a00473044022068cd4851fc7f9a892ab910df7a24e616f293bcb5c5fbdfbc304a194b26b60fba022078e6da13d8cb881a22939b952c24f88b97afd06b4c47a47d7f804c9a352a6d6d0100ffffffff0101000000000000002321033bcaa0a602f0d44cc9d5637c6e515b0471db514c020883830b7cefd73af04194ac00000000", "P2SH"], [[["ba4cd7ae2ad4d4d13ebfc8ab1d93a63e4a6563f25089a18bf0fc68f282aa88c1", 0, "2 0x21 0x037c615d761e71d38903609bf4f46847266edc2fb37532047d747ba47eaae5ffe1 0x21 0x02edc823cd634f2c4033d94f5755207cb6b60c4b1f1f056ad7471c47de5f2e4d50 2 CHECKMULTISIG NOT"]], "0100000001c188aa82f268fcf08ba18950f263654a3ea6931dabc8bf3ed1d4d42aaed74cba000000004b0000483045022100940378576e069aca261a6b26fb38344e4497ca6751bb10905c76bb689f4222b002204833806b014c26fd801727b792b1260003c55710f87c5adbd7a9cb57446dbc9801ffffffff0101000000000000002321037c615d761e71d38903609bf4f46847266edc2fb37532047d747ba47eaae5ffe1ac00000000", "P2SH"], +["CHECKLOCKTIMEVERIFY tests"], -["OP_CODESEPARATOR tests"], +["By-height locks, with argument == 0 and == tx nLockTime"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "499999999 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], -["Test that SignatureHash() removes OP_CODESEPARATOR with FindAndDelete()"], -[[["bc7fd132fcf817918334822ee6d9bd95c889099c96e07ca2c1eb2cc70db63224", 0, "CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIG"]], -"01000000012432b60dc72cebc1a27ce0969c0989c895bdd9e62e8234839117f8fc32d17fbc000000004a493046022100a576b52051962c25e642c0fd3d77ee6c92487048e5d90818bcf5b51abaccd7900221008204f8fb121be4ec3b24483b1f92d89b1b0548513a134e345c5442e86e8617a501ffffffff010000000000000000016a00000000", "P2SH"], -[[["83e194f90b6ef21fa2e3a365b63794fb5daa844bdc9b25de30899fcfe7b01047", 0, "CODESEPARATOR CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIG"]], -"01000000014710b0e7cf9f8930de259bdc4b84aa5dfb9437b665a3e3a21ff26e0bf994e183000000004a493046022100a166121a61b4eeb19d8f922b978ff6ab58ead8a5a5552bf9be73dc9c156873ea02210092ad9bc43ee647da4f6652c320800debcf08ec20a094a0aaf085f63ecb37a17201ffffffff010000000000000000016a00000000", "P2SH"], +["By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000000 NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967295 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", "P2SH,CHECKLOCKTIMEVERIFY"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000000 NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", "P2SH,CHECKLOCKTIMEVERIFY"], -["Hashed data starts at the CODESEPARATOR"], -[[["326882a7f22b5191f1a0cc9962ca4b878cd969cf3b3a70887aece4d801a0ba5e", 0, "0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CODESEPARATOR CHECKSIG"]], -"01000000015ebaa001d8e4ec7a88703a3bcf69d98c874bca6299cca0f191512bf2a7826832000000004948304502203bf754d1c6732fbf87c5dcd81258aefd30f2060d7bd8ac4a5696f7927091dad1022100f5bcb726c4cf5ed0ed34cc13dadeedf628ae1045b7cb34421bc60b89f4cecae701ffffffff010000000000000000016a00000000", "P2SH"], +["Any non-maxint nSequence is fine"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], -["But only if execution has reached it"], -[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIGVERIFY CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIGVERIFY CODESEPARATOR 1"]], -"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a900000000924830450221009c0a27f886a1d8cb87f6f595fbc3163d28f7a81ec3c4b252ee7f3ac77fd13ffa02203caa8dfa09713c8c4d7ef575c75ed97812072405d932bd11e6a1593a98b679370148304502201e3861ef39a526406bad1e20ecad06be7375ad40ddb582c9be42d26c3a0d7b240221009d0a3985e96522e59635d19cc4448547477396ce0ef17a58e7d74c3ef464292301ffffffff010000000000000000016a00000000", "P2SH"], +["The argument can be calculated rather than created directly by a PUSHDATA"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "499999999 1ADD NOP2 1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], -["CODESEPARATOR in an unexecuted IF block does not change what is hashed"], -[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "IF CODESEPARATOR ENDIF 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 CHECKSIGVERIFY CODESEPARATOR 1"]], -"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a48304502207a6974a77c591fa13dff60cabbb85a0de9e025c09c65a4b2285e47ce8e22f761022100f0efaac9ff8ac36b10721e0aae1fb975c90500b50c56e8a0cc52b0403f0425dd0100ffffffff010000000000000000016a00000000", "P2SH"], +["Perhaps even by an ADD producing a 5-byte result that is out of bounds for other opcodes"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1 2147483646 ADD NOP2 1"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000feffffff", "P2SH,CHECKLOCKTIMEVERIFY"], -["As above, with the IF block executed"], -[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "IF CODESEPARATOR ENDIF 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 CHECKSIGVERIFY CODESEPARATOR 1"]], -"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a483045022100fa4a74ba9fd59c59f46c3960cf90cbe0d2b743c471d24a3d5d6db6002af5eebb02204d70ec490fd0f7055a7c45f86514336e3a7f03503dacecabb247fc23f15c83510151ffffffff010000000000000000016a00000000", "P2SH"], +["5 byte non-minimally-encoded arguments are valid"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x05 0x0000000000 NOP2 1"]], +"010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], +["Valid CHECKLOCKTIMEVERIFY in scriptSig"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "1"]], +"01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b1000000000100000000000000000001000000", "P2SH,CHECKLOCKTIMEVERIFY"], -["CHECKSIG is legal in scriptSigs"], -[[["ccf7f4053a02e653c36ac75c891b7496d0dc5ce5214f6c913d9cf8f1329ebee0", 0, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], -"0100000001e0be9e32f1f89c3d916c4f21e55cdcd096741b895cc76ac353e6023a05f4f7cc00000000d86149304602210086e5f736a2c3622ebb62bd9d93d8e5d76508b98be922b97160edc3dcca6d8c47022100b23c312ac232a4473f19d2aeb95ab7bdf2b65518911a0d72d50e38b5dd31dc820121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac4730440220508fa761865c8abd81244a168392876ee1d94e8ed83897066b5e2df2400dad24022043f5ee7538e87e9c6aef7ef55133d3e51da7cc522830a9c4d736977a76ef755c0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH"], - -["Same semantics for OP_CODESEPARATOR"], -[[["10c9f0effe83e97f80f067de2b11c6a00c3088a4bce42c5ae761519af9306f3c", 1, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], -"01000000013c6f30f99a5161e75a2ce4bca488300ca0c6112bde67f0807fe983feeff0c91001000000e608646561646265656675ab61493046022100ce18d384221a731c993939015e3d1bcebafb16e8c0b5b5d14097ec8177ae6f28022100bcab227af90bab33c3fe0a9abfee03ba976ee25dc6ce542526e9b2e56e14b7f10121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac493046022100c3b93edcc0fd6250eb32f2dd8a0bba1754b0f6c3be8ed4100ed582f3db73eba2022100bf75b5bd2eff4d6bf2bda2e34a40fcc07d4aa3cf862ceaa77b47b81eff829f9a01ab21038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH"], - -["Signatures are removed from the script they are in by FindAndDelete() in the CHECKSIG code; even multiple instances of one signature can be removed."], -[[["6056ebd549003b10cbbd915cea0d82209fe40b8617104be917a26fa92cbe3d6f", 0, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], -"01000000016f3dbe2ca96fa217e94b1017860be49f20820dea5c91bdcb103b0049d5eb566000000000fd1d0147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac47304402203757e937ba807e4a5da8534c17f9d121176056406a6465054bdd260457515c1a02200f02eccf1bec0f3a0d65df37889143c2e88ab7acec61a7b6f5aa264139141a2b0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH"], - -["That also includes ahead of the opcode being executed."], -[[["5a6b0021a6042a686b6b94abc36b387bef9109847774e8b1e51eb8cc55c53921", 1, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], -"01000000012139c555ccb81ee5b1e87477840991ef7b386bc3ab946b6b682a04a621006b5a01000000fdb40148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f2204148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390175ac4830450220646b72c35beeec51f4d5bc1cbae01863825750d7f490864af354e6ea4f625e9c022100f04b98432df3a9641719dbced53393022e7249fb59db993af1118539830aab870148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a580039017521038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH"], - -["Finally CHECKMULTISIG removes all signatures prior to hashing the script containing those signatures. In conjunction with the SIGHASH_SINGLE bug this lets us test whether or not FindAndDelete() is actually present in scriptPubKey/redeemScript evaluation by including a signature of the digest 0x01 We can compute in advance for our pubkey, embed it it in the scriptPubKey, and then also using a normal SIGHASH_ALL signature. If FindAndDelete() wasn't run, the 'bugged' signature would still be in the hashed script, and the normal signature would fail."], - -["Here's an example on mainnet within a P2SH redeemScript. Remarkably it's a standard transaction in <0.9"], -[[["b5b598de91787439afd5938116654e0b16b7a0d0f82742ba37564219c5afcbf9", 0, "DUP HASH160 0x14 0xf6f365c40f0739b61de827a44751e5e99032ed8f EQUALVERIFY CHECKSIG"], - ["ab9805c6d57d7070d9a42c5176e47bb705023e6b67249fb6760880548298e742", 0, "HASH160 0x14 0xd8dacdadb7462ae15cd906f1878706d0da8660e6 EQUAL"]], -"0100000002f9cbafc519425637ba4227f8d0a0b7160b4e65168193d5af39747891de98b5b5000000006b4830450221008dd619c563e527c47d9bd53534a770b102e40faa87f61433580e04e271ef2f960220029886434e18122b53d5decd25f1f4acb2480659fea20aabd856987ba3c3907e0121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffff42e7988254800876b69f24676b3e0205b77be476512ca4d970707dd5c60598ab00000000fd260100483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a53034930460221008431bdfa72bc67f9d41fe72e94c88fb8f359ffa30b33c72c121c5a877d922e1002210089ef5fc22dd8bfc6bf9ffdb01a9862d27687d424d1fefbab9e9c7176844a187a014c9052483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c7153aeffffffff01a08601000000000017a914d8dacdadb7462ae15cd906f1878706d0da8660e68700000000", "P2SH"], - -["Same idea, but with bare CHECKMULTISIG"], -[[["ceafe58e0f6e7d67c0409fbbf673c84c166e3c5d3c24af58f7175b18df3bb3db", 0, "DUP HASH160 0x14 0xf6f365c40f0739b61de827a44751e5e99032ed8f EQUALVERIFY CHECKSIG"], - ["ceafe58e0f6e7d67c0409fbbf673c84c166e3c5d3c24af58f7175b18df3bb3db", 1, "2 0x48 0x3045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 3 CHECKMULTISIG"]], -"0100000002dbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce000000006b4830450221009627444320dc5ef8d7f68f35010b4c050a6ed0d96b67a84db99fda9c9de58b1e02203e4b4aaa019e012e65d69b487fdf8719df72f488fa91506a80c49a33929f1fd50121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffffdbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce010000009300483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303ffffffff01a0860100000000001976a9149bc0bbdd3024da4d0c38ed1aecf5c68dd1d3fa1288ac00000000", "P2SH"], - +["Valid CHECKLOCKTIMEVERIFY in redeemScript"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x69780eeb90f65bf4cec3c17aeb3d466b88e7be66 EQUAL"]], +"0100000001000100000000000000000000000000000000000000000000000000000000000000000000030251b1000000000100000000000000000001000000", "P2SH,CHECKLOCKTIMEVERIFY"], ["Make diffs cleaner by leaving a comment here without comma at the end"] ] diff --git a/txscript/doc.go b/txscript/doc.go index 2f2b1ec6..0079ab9c 100644 --- a/txscript/doc.go +++ b/txscript/doc.go @@ -1,34 +1,31 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package txscript implements the bitcoin transaction script language. - -A complete description of the script language used by bitcoin can be found at -https://en.bitcoin.it/wiki/Script. The following only serves as a quick -overview to provide information on how to use the package. +Package txscript implements the decred transaction script language. This package provides data structures and functions to parse and execute -bitcoin transaction scripts. +decred transaction scripts. Script Overview -Bitcoin transaction scripts are written in a stack-base, FORTH-like language. +Decred transaction scripts are written in a stack-base, FORTH-like language. -The bitcoin script language consists of a number of opcodes which fall into +The decred script language consists of a number of opcodes which fall into several categories such pushing and popping data to and from the stack, performing basic and bitwise arithmetic, conditional branching, comparing hashes, and checking cryptographic signatures. Scripts are processed from left to right and intentionally do not provide loops. -The vast majority of Bitcoin scripts at the time of this writing are of several +The vast majority of Decred scripts at the time of this writing are of several standard forms which consist of a spender providing a public key and a signature which proves the spender owns the associated private key. This information is used to prove the the spender is authorized to perform the transaction. One benefit of using a scripting language is added flexibility in specifying -what conditions must be met in order to spend bitcoins. +what conditions must be met in order to spend decreds. Errors diff --git a/txscript/engine.go b/txscript/engine.go index 93855ea2..c6bb7aa8 100644 --- a/txscript/engine.go +++ b/txscript/engine.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,8 +9,8 @@ import ( "fmt" "math/big" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/wire" ) // ScriptFlags is a bitmask defining additional operations or tests that will be @@ -33,6 +34,11 @@ const ( // executed. ScriptDiscourageUpgradableNops + // ScriptVerifyCheckLockTimeVerify defines whether to verify that + // a transaction output is spendable based on the locktime. + // This is BIP0065. + ScriptVerifyCheckLockTimeVerify + // ScriptVerifyCleanStack defines that the stack must contain only // one stack element after evaluation and that the element must be // true if interpreted as a boolean. This is rule 6 of BIP0062. @@ -64,17 +70,22 @@ const ( const ( // maxStackSize is the maximum combined height of stack and alt stack // during execution. - maxStackSize = 1000 + maxStackSize = 1024 // maxScriptSize is the maximum allowed length of a raw script. - maxScriptSize = 10000 + maxScriptSize = 16384 + + // defaultScriptVersion is the default scripting language version + // representing extended Decred script. + DefaultScriptVersion = uint16(0) ) // halforder is used to tame ECDSA malleability (see BIP0062). -var halfOrder = new(big.Int).Rsh(btcec.S256().N, 1) +var halfOrder = new(big.Int).Rsh(chainec.Secp256k1.GetN(), 1) // Engine is the virtual machine that executes scripts. type Engine struct { + version uint16 scripts [][]parsedOpcode scriptIdx int scriptOff int @@ -220,7 +231,6 @@ func (vm *Engine) CheckErrorCondition(finalScript bool) error { } if finalScript && vm.hasFlag(ScriptVerifyCleanStack) && vm.dstack.Depth() != 1 { - return ErrStackCleanStack } else if vm.dstack.Depth() < 1 { return ErrStackEmptyStack @@ -311,7 +321,8 @@ func (vm *Engine) Step() (done bool, err error) { vm.scriptIdx++ } // there are zero length scripts in the wild - if vm.scriptIdx < len(vm.scripts) && vm.scriptOff >= len(vm.scripts[vm.scriptIdx]) { + if vm.scriptIdx < len(vm.scripts) && + vm.scriptOff >= len(vm.scripts[vm.scriptIdx]) { vm.scriptIdx++ } vm.lastCodeSep = 0 @@ -325,6 +336,13 @@ func (vm *Engine) Step() (done bool, err error) { // Execute will execute all scripts in the script engine and return either nil // for successful validation or an error if one occurred. func (vm *Engine) Execute() (err error) { + // All non-default version scripts currently execute without issue, + // making all outputs to them anyone can pay. In the future this + // will allow for the addition of new scripting languages. + if vm.version != DefaultScriptVersion { + return nil + } + done := false for done != true { log.Tracef("%v", newLogClosure(func() string { @@ -573,7 +591,8 @@ func (vm *Engine) SetAltStack(data [][]byte) { // NewEngine returns a new script engine for the provided public key script, // transaction, and input index. The flags modify the behavior of the script // engine according to the description provided by each flag. -func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags) (*Engine, error) { +func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, + flags ScriptFlags, scriptVersion uint16) (*Engine, error) { // The provided transaction input index must refer to a valid input. if txIdx < 0 || txIdx >= len(tx.TxIn) { return nil, ErrInvalidIndex @@ -588,7 +607,7 @@ func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags // allowing the clean stack flag without the P2SH flag would make it // possible to have a situation where P2SH would not be a soft fork when // it should be. - vm := Engine{flags: flags} + vm := Engine{version: scriptVersion, flags: flags} if vm.hasFlag(ScriptVerifyCleanStack) && !vm.hasFlag(ScriptBip16) { return nil, ErrInvalidFlags } @@ -599,6 +618,16 @@ func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags return nil, ErrStackNonPushOnly } + // Subscripts for pay to script hash outputs are not allowed + // to use any stake tag OP codes if the script version is 0. + if scriptVersion == DefaultScriptVersion { + err := HasP2SHScriptSigStakeOpCodes(scriptVersion, scriptSig, + scriptPubKey) + if err != nil { + return nil, err + } + } + // The engine stores the scripts in parsed form using a slice. This // allows multiple scripts to be executed in sequence. For example, // with a pay-to-script-hash transaction, there will be ultimately be @@ -623,7 +652,7 @@ func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags vm.scriptIdx++ } - if vm.hasFlag(ScriptBip16) && isScriptHash(vm.scripts[1]) { + if vm.hasFlag(ScriptBip16) && isAnyKindOfScriptHash(vm.scripts[1]) { // Only accept input scripts that push data for P2SH. if !isPushOnly(vm.scripts[0]) { return nil, ErrStackP2SHNonPushOnly diff --git a/txscript/engine_test.go b/txscript/engine_test.go index 5d47fc6e..370aec8f 100644 --- a/txscript/engine_test.go +++ b/txscript/engine_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,8 +8,9 @@ package txscript_test import ( "testing" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" ) // TestBadPC sets the pc to a deliberately bad result then confirms that Step() @@ -35,7 +37,7 @@ func TestBadPC(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ + Hash: chainhash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, @@ -62,7 +64,7 @@ func TestBadPC(t *testing.T) { pkScript := []byte{txscript.OP_NOP} for _, test := range pcTests { - vm, err := txscript.NewEngine(pkScript, tx, 0, 0) + vm, err := txscript.NewEngine(pkScript, tx, 0, 0, 0) if err != nil { t.Errorf("Failed to create script: %v", err) } @@ -95,7 +97,7 @@ func TestCheckErrorCondition(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ + Hash: chainhash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, @@ -133,7 +135,7 @@ func TestCheckErrorCondition(t *testing.T) { txscript.OP_TRUE, } - vm, err := txscript.NewEngine(pkScript, tx, 0, 0) + vm, err := txscript.NewEngine(pkScript, tx, 0, 0, 0) if err != nil { t.Errorf("failed to create script: %v", err) } @@ -187,7 +189,7 @@ func TestInvalidFlagCombinations(t *testing.T) { TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash([32]byte{ + Hash: chainhash.Hash([32]byte{ 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x41, 0x02, 0xfa, 0x20, 0x9c, 0x6a, @@ -214,7 +216,7 @@ func TestInvalidFlagCombinations(t *testing.T) { pkScript := []byte{txscript.OP_NOP} for i, test := range tests { - _, err := txscript.NewEngine(pkScript, tx, 0, test) + _, err := txscript.NewEngine(pkScript, tx, 0, test, 0) if err != txscript.ErrInvalidFlags { t.Fatalf("TestInvalidFlagCombinations #%d unexpected "+ "error: %v", i, err) diff --git a/txscript/error.go b/txscript/error.go index 346a418e..4314f096 100644 --- a/txscript/error.go +++ b/txscript/error.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,6 +10,7 @@ import ( "fmt" ) +// Engine execution errors. var ( // ErrStackShortScript is returned if the script has an opcode that is // too long for the length of the script. @@ -135,6 +137,7 @@ var ( ErrStackMinimalData = errors.New("non-minimally encoded script number") ) +// Engine script errors. var ( // ErrInvalidFlags is returned when the passed flags to NewScript // contain an invalid combination. @@ -145,10 +148,45 @@ var ( ErrInvalidIndex = errors.New("invalid input index") // ErrUnsupportedAddress is returned when a concrete type that - // implements a btcutil.Address is not a supported type. + // implements a dcrutil.Address is not a supported type. ErrUnsupportedAddress = errors.New("unsupported address type") // ErrBadNumRequired is returned from MultiSigScript when nrequired is // larger than the number of provided public keys. ErrBadNumRequired = errors.New("more signatures required than keys present") + + // ErrSighashSingleIdx + ErrSighashSingleIdx = errors.New("invalid SIGHASH_SINGLE script index") + + // ErrSubstrIndexNegative indicates that the substring index was negative + // and thus invalid. + ErrSubstrIdxNegative = errors.New("negative number given for substring " + + "index") + + // ErrSubstrIdxOutOfBounds indicates that the substring index was too large + // and thus invalid. + ErrSubstrIdxOutOfBounds = errors.New("out of bounds number given for " + + "substring index") + + // ErrNegativeRotation indicates that too low of a rotation depth was given + // for a uint32 bit rotation. + ErrNegativeRotation = errors.New("rotation depth negative") + + // ErrRotationOverflow indicates that too high of a rotation depth was given + // for a uint32 bit rotation. + ErrRotationOverflow = errors.New("rotation depth out of bounds") + + // ErrNegativeRotation indicates that too low of a shift depth was given + // for a uint32 bit shift. + ErrNegativeShift = errors.New("shift depth negative") + + // ErrShiftOverflow indicates that too high of a shift depth was given + // for a uint32 bit shift. + ErrShiftOverflow = errors.New("shift depth out of bounds") + + // ErrDivideByZero indicates that a user attempted to divide by zero. + ErrDivideByZero = errors.New("division by zero") + + // ErrP2SHStakeOpCodes indicates a P2SH script contained stake op codes. + ErrP2SHStakeOpCodes = errors.New("stake opcodes were found in a p2sh script") ) diff --git a/txscript/example_test.go b/txscript/example_test.go index bb3f6ac9..e4e5b4d3 100644 --- a/txscript/example_test.go +++ b/txscript/example_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,23 +9,28 @@ import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) -// This example demonstrates creating a script which pays to a bitcoin address. +var secp = 0 +var edwards = 1 +var secSchnorr = 2 + +// This example demonstrates creating a script which pays to a decred address. // It also prints the created script hex and uses the DisasmString function to // display the disassembled script. func ExamplePayToAddrScript() { - // Parse the address to send the coins to into a btcutil.Address + // Parse the address to send the coins to into a dcrutil.Address // which is useful to ensure the accuracy of the address and determine // the address type. It is also required for the upcoming call to // PayToAddrScript. - addressStr := "12gpXQVcCL2qhTNQgyLVdCFG2Qs2px98nV" - address, err := btcutil.DecodeAddress(addressStr, &chaincfg.MainNetParams) + addressStr := "DsSej1qR3Fyc8kV176DCh9n9cY9nqf9Quxk" + address, err := dcrutil.DecodeAddress(addressStr, &chaincfg.MainNetParams) if err != nil { fmt.Println(err) return @@ -63,7 +69,7 @@ func ExampleExtractPkScriptAddrs() { // Extract and print details from the script. scriptClass, addresses, reqSigs, err := txscript.ExtractPkScriptAddrs( - script, &chaincfg.MainNetParams) + txscript.DefaultScriptVersion, script, &chaincfg.MainNetParams) if err != nil { fmt.Println(err) return @@ -74,7 +80,7 @@ func ExampleExtractPkScriptAddrs() { // Output: // Script Class: pubkeyhash - // Addresses: [12gpXQVcCL2qhTNQgyLVdCFG2Qs2px98nV] + // Addresses: [DsSej1qR3Fyc8kV176DCh9n9cY9nqf9Quxk] // Required Signatures: 1 } @@ -88,10 +94,10 @@ func ExampleSignTxOutput() { fmt.Println(err) return } - privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) - pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed()) - addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, - &chaincfg.MainNetParams) + privKey, pubKey := chainec.Secp256k1.PrivKeyFromBytes(privKeyBytes) + pubKeyHash := dcrutil.Hash160(pubKey.SerializeCompressed()) + addr, err := dcrutil.NewAddressPubKeyHash(pubKeyHash, + &chaincfg.MainNetParams, chainec.ECTypeSecp256k1) if err != nil { fmt.Println(err) return @@ -99,9 +105,9 @@ func ExampleSignTxOutput() { // For this example, create a fake transaction that represents what // would ordinarily be the real transaction that is being spent. It - // contains a single output that pays to address in the amount of 1 BTC. + // contains a single output that pays to address in the amount of 1 DCR. originTx := wire.NewMsgTx() - prevOut := wire.NewOutPoint(&wire.ShaHash{}, ^uint32(0)) + prevOut := wire.NewOutPoint(&chainhash.Hash{}, ^uint32(0), dcrutil.TxTreeRegular) txIn := wire.NewTxIn(prevOut, []byte{txscript.OP_0, txscript.OP_0}) originTx.AddTxIn(txIn) pkScript, err := txscript.PayToAddrScript(addr) @@ -119,7 +125,7 @@ func ExampleSignTxOutput() { // Add the input(s) the redeeming transaction will spend. There is no // signature script at this point since it hasn't been created or signed // yet, hence nil is provided for it. - prevOut = wire.NewOutPoint(&originTxHash, 0) + prevOut = wire.NewOutPoint(&originTxHash, 0, dcrutil.TxTreeRegular) txIn = wire.NewTxIn(prevOut, nil) redeemTx.AddTxIn(txIn) @@ -129,7 +135,7 @@ func ExampleSignTxOutput() { redeemTx.AddTxOut(txOut) // Sign the redeeming transaction. - lookupKey := func(a btcutil.Address) (*btcec.PrivateKey, bool, error) { + lookupKey := func(a dcrutil.Address) (chainec.PrivateKey, bool, error) { // Ordinarily this function would involve looking up the private // key for the provided address, but since the only thing being // signed in this example uses the address associated with the @@ -152,7 +158,7 @@ func ExampleSignTxOutput() { // being signed. sigScript, err := txscript.SignTxOutput(&chaincfg.MainNetParams, redeemTx, 0, originTx.TxOut[0].PkScript, txscript.SigHashAll, - txscript.KeyClosure(lookupKey), nil, nil) + txscript.KeyClosure(lookupKey), nil, nil, secp) if err != nil { fmt.Println(err) return @@ -162,10 +168,9 @@ func ExampleSignTxOutput() { // Prove that the transaction has been validly signed by executing the // script pair. flags := txscript.ScriptBip16 | txscript.ScriptVerifyDERSignatures | - txscript.ScriptStrictMultiSig | txscript.ScriptDiscourageUpgradableNops vm, err := txscript.NewEngine(originTx.TxOut[0].PkScript, redeemTx, 0, - flags) + flags, 0) if err != nil { fmt.Println(err) return diff --git a/txscript/internal_test.go b/txscript/internal_test.go index 8fc1c00d..5f807865 100644 --- a/txscript/internal_test.go +++ b/txscript/internal_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -2906,33 +2907,33 @@ func TestUnparsingInvalidOpcodes(t *testing.T) { expectedErr: ErrStackInvalidOpcode, }, { - name: "OP_RESERVED1", + name: "OP_ROTR", pop: &parsedOpcode{ - opcode: &opcodeArray[OP_RESERVED1], + opcode: &opcodeArray[OP_ROTR], data: nil, }, expectedErr: nil, }, { - name: "OP_RESERVED1 long", + name: "OP_ROTR long", pop: &parsedOpcode{ - opcode: &opcodeArray[OP_RESERVED1], + opcode: &opcodeArray[OP_ROTR], data: make([]byte, 1), }, expectedErr: ErrStackInvalidOpcode, }, { - name: "OP_RESERVED2", + name: "OP_ROTL", pop: &parsedOpcode{ - opcode: &opcodeArray[OP_RESERVED2], + opcode: &opcodeArray[OP_ROTL], data: nil, }, expectedErr: nil, }, { - name: "OP_RESERVED2 long", + name: "OP_ROTL long", pop: &parsedOpcode{ - opcode: &opcodeArray[OP_RESERVED2], + opcode: &opcodeArray[OP_ROTL], data: make([]byte, 1), }, expectedErr: ErrStackInvalidOpcode, diff --git a/txscript/log.go b/txscript/log.go index f63da728..dc3aac97 100644 --- a/txscript/log.go +++ b/txscript/log.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/txscript/log_test.go b/txscript/log_test.go index 851f9372..5619e9a9 100644 --- a/txscript/log_test.go +++ b/txscript/log_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,7 +11,7 @@ import ( "os" "testing" - "github.com/btcsuite/btcd/txscript" + "github.com/decred/dcrd/txscript" ) func TestSetLogWriter(t *testing.T) { diff --git a/txscript/opcode.go b/txscript/opcode.go index a5a8d213..ff86a1c3 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,15 +9,20 @@ import ( "bytes" "crypto/sha1" "encoding/binary" + "errors" "fmt" "hash" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/fastsha256" "github.com/btcsuite/golangcrypto/ripemd160" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) +var optimizeSigVerification = chaincfg.SigHashOptimization + // An opcode defines the information related to a txscript opcode. opfunc if // present is the function to call to perform the opcode on the script. The // current script is passed in as a slice with the first member being the opcode @@ -30,7 +36,7 @@ type opcode struct { // These constants are the values of the official opcodes used on the btc wiki, // in bitcoin core and in most if not all other references and software related -// to handling BTC scripts. +// to handling DCR scripts. const ( OP_0 = 0x00 // 0 OP_FALSE = 0x00 // 0 - AKA OP_0 @@ -171,8 +177,8 @@ const ( OP_XOR = 0x86 // 134 OP_EQUAL = 0x87 // 135 OP_EQUALVERIFY = 0x88 // 136 - OP_RESERVED1 = 0x89 // 137 - OP_RESERVED2 = 0x8a // 138 + OP_ROTR = 0x89 // 137 + OP_ROTL = 0x8a // 138 OP_1ADD = 0x8b // 139 OP_1SUB = 0x8c // 140 OP_2MUL = 0x8d // 141 @@ -212,6 +218,7 @@ const ( OP_CHECKMULTISIGVERIFY = 0xaf // 175 OP_NOP1 = 0xb0 // 176 OP_NOP2 = 0xb1 // 177 + OP_CHECKLOCKTIMEVERIFY = 0xb1 // 177 - AKA OP_NOP2 OP_NOP3 = 0xb2 // 178 OP_NOP4 = 0xb3 // 179 OP_NOP5 = 0xb4 // 180 @@ -220,12 +227,12 @@ const ( OP_NOP8 = 0xb7 // 183 OP_NOP9 = 0xb8 // 184 OP_NOP10 = 0xb9 // 185 - OP_UNKNOWN186 = 0xba // 186 - OP_UNKNOWN187 = 0xbb // 187 - OP_UNKNOWN188 = 0xbc // 188 - OP_UNKNOWN189 = 0xbd // 189 - OP_UNKNOWN190 = 0xbe // 190 - OP_UNKNOWN191 = 0xbf // 191 + OP_SSTX = 0xba // 186 DECRED + OP_SSGEN = 0xbb // 187 DECRED + OP_SSRTX = 0xbc // 188 DECRED + OP_SSTXCHANGE = 0xbd // 189 DECRED + OP_CHECKSIGALT = 0xbe // 190 DECRED + OP_CHECKSIGALTVERIFY = 0xbf // 191 DECRED OP_UNKNOWN192 = 0xc0 // 192 OP_UNKNOWN193 = 0xc1 // 193 OP_UNKNOWN194 = 0xc2 // 194 @@ -403,16 +410,17 @@ var opcodeArray = [256]opcode{ OP_16: {OP_16, "OP_16", 1, opcodeN}, // Control opcodes. - OP_NOP: {OP_NOP, "OP_NOP", 1, opcodeNop}, - OP_VER: {OP_VER, "OP_VER", 1, opcodeReserved}, - OP_IF: {OP_IF, "OP_IF", 1, opcodeIf}, - OP_NOTIF: {OP_NOTIF, "OP_NOTIF", 1, opcodeNotIf}, - OP_VERIF: {OP_VERIF, "OP_VERIF", 1, opcodeReserved}, - OP_VERNOTIF: {OP_VERNOTIF, "OP_VERNOTIF", 1, opcodeReserved}, - OP_ELSE: {OP_ELSE, "OP_ELSE", 1, opcodeElse}, - OP_ENDIF: {OP_ENDIF, "OP_ENDIF", 1, opcodeEndif}, - OP_VERIFY: {OP_VERIFY, "OP_VERIFY", 1, opcodeVerify}, - OP_RETURN: {OP_RETURN, "OP_RETURN", 1, opcodeReturn}, + OP_NOP: {OP_NOP, "OP_NOP", 1, opcodeNop}, + OP_VER: {OP_VER, "OP_VER", 1, opcodeReserved}, + OP_IF: {OP_IF, "OP_IF", 1, opcodeIf}, + OP_NOTIF: {OP_NOTIF, "OP_NOTIF", 1, opcodeNotIf}, + OP_VERIF: {OP_VERIF, "OP_VERIF", 1, opcodeReserved}, + OP_VERNOTIF: {OP_VERNOTIF, "OP_VERNOTIF", 1, opcodeReserved}, + OP_ELSE: {OP_ELSE, "OP_ELSE", 1, opcodeElse}, + OP_ENDIF: {OP_ENDIF, "OP_ENDIF", 1, opcodeEndif}, + OP_VERIFY: {OP_VERIFY, "OP_VERIFY", 1, opcodeVerify}, + OP_RETURN: {OP_RETURN, "OP_RETURN", 1, opcodeReturn}, + OP_CHECKLOCKTIMEVERIFY: {OP_CHECKLOCKTIMEVERIFY, "OP_CHECKLOCKTIMEVERIFY", 1, opcodeCheckLockTimeVerify}, // Stack opcodes. OP_TOALTSTACK: {OP_TOALTSTACK, "OP_TOALTSTACK", 1, opcodeToAltStack}, @@ -436,38 +444,42 @@ var opcodeArray = [256]opcode{ OP_TUCK: {OP_TUCK, "OP_TUCK", 1, opcodeTuck}, // Splice opcodes. - OP_CAT: {OP_CAT, "OP_CAT", 1, opcodeDisabled}, - OP_SUBSTR: {OP_SUBSTR, "OP_SUBSTR", 1, opcodeDisabled}, - OP_LEFT: {OP_LEFT, "OP_LEFT", 1, opcodeDisabled}, - OP_RIGHT: {OP_RIGHT, "OP_RIGHT", 1, opcodeDisabled}, + OP_CAT: {OP_CAT, "OP_CAT", 1, opcodeCat}, + OP_SUBSTR: {OP_SUBSTR, "OP_SUBSTR", 1, opcodeSubstr}, + OP_LEFT: {OP_LEFT, "OP_LEFT", 1, opcodeLeft}, + OP_RIGHT: {OP_RIGHT, "OP_RIGHT", 1, opcodeRight}, OP_SIZE: {OP_SIZE, "OP_SIZE", 1, opcodeSize}, - // Bitwise logic opcodes. - OP_INVERT: {OP_INVERT, "OP_INVERT", 1, opcodeDisabled}, - OP_AND: {OP_AND, "OP_AND", 1, opcodeDisabled}, - OP_OR: {OP_OR, "OP_OR", 1, opcodeDisabled}, - OP_XOR: {OP_XOR, "OP_XOR", 1, opcodeDisabled}, + // Bitwise logic opcodes for int32 registers derived from the stack. + OP_INVERT: {OP_INVERT, "OP_INVERT", 1, opcodeInvert}, + OP_AND: {OP_AND, "OP_AND", 1, opcodeAnd}, + OP_OR: {OP_OR, "OP_OR", 1, opcodeOr}, + OP_XOR: {OP_XOR, "OP_XOR", 1, opcodeXor}, + + // Bytewise comparison function opcodes for byte strings. OP_EQUAL: {OP_EQUAL, "OP_EQUAL", 1, opcodeEqual}, OP_EQUALVERIFY: {OP_EQUALVERIFY, "OP_EQUALVERIFY", 1, opcodeEqualVerify}, - OP_RESERVED1: {OP_RESERVED1, "OP_RESERVED1", 1, opcodeReserved}, - OP_RESERVED2: {OP_RESERVED2, "OP_RESERVED2", 1, opcodeReserved}, + + // Bitwise rotation opcodes for an int32 register derived from the stack. + OP_ROTR: {OP_ROTR, "OP_ROTR", 1, opcodeRotr}, + OP_ROTL: {OP_ROTL, "OP_ROTL", 1, opcodeRotl}, // Numeric related opcodes. OP_1ADD: {OP_1ADD, "OP_1ADD", 1, opcode1Add}, OP_1SUB: {OP_1SUB, "OP_1SUB", 1, opcode1Sub}, - OP_2MUL: {OP_2MUL, "OP_2MUL", 1, opcodeDisabled}, - OP_2DIV: {OP_2DIV, "OP_2DIV", 1, opcodeDisabled}, + OP_2MUL: {OP_2MUL, "OP_2MUL", 1, opcodeNop}, + OP_2DIV: {OP_2DIV, "OP_2DIV", 1, opcodeNop}, OP_NEGATE: {OP_NEGATE, "OP_NEGATE", 1, opcodeNegate}, OP_ABS: {OP_ABS, "OP_ABS", 1, opcodeAbs}, OP_NOT: {OP_NOT, "OP_NOT", 1, opcodeNot}, OP_0NOTEQUAL: {OP_0NOTEQUAL, "OP_0NOTEQUAL", 1, opcode0NotEqual}, OP_ADD: {OP_ADD, "OP_ADD", 1, opcodeAdd}, OP_SUB: {OP_SUB, "OP_SUB", 1, opcodeSub}, - OP_MUL: {OP_MUL, "OP_MUL", 1, opcodeDisabled}, - OP_DIV: {OP_DIV, "OP_DIV", 1, opcodeDisabled}, - OP_MOD: {OP_MOD, "OP_MOD", 1, opcodeDisabled}, - OP_LSHIFT: {OP_LSHIFT, "OP_LSHIFT", 1, opcodeDisabled}, - OP_RSHIFT: {OP_RSHIFT, "OP_RSHIFT", 1, opcodeDisabled}, + OP_MUL: {OP_MUL, "OP_MUL", 1, opcodeMul}, + OP_DIV: {OP_DIV, "OP_DIV", 1, opcodeDiv}, + OP_MOD: {OP_MOD, "OP_MOD", 1, opcodeMod}, + OP_LSHIFT: {OP_LSHIFT, "OP_LSHIFT", 1, opcodeLShift}, + OP_RSHIFT: {OP_RSHIFT, "OP_RSHIFT", 1, opcodeRShift}, OP_BOOLAND: {OP_BOOLAND, "OP_BOOLAND", 1, opcodeBoolAnd}, OP_BOOLOR: {OP_BOOLOR, "OP_BOOLOR", 1, opcodeBoolOr}, OP_NUMEQUAL: {OP_NUMEQUAL, "OP_NUMEQUAL", 1, opcodeNumEqual}, @@ -487,7 +499,7 @@ var opcodeArray = [256]opcode{ OP_SHA256: {OP_SHA256, "OP_SHA256", 1, opcodeSha256}, OP_HASH160: {OP_HASH160, "OP_HASH160", 1, opcodeHash160}, OP_HASH256: {OP_HASH256, "OP_HASH256", 1, opcodeHash256}, - OP_CODESEPARATOR: {OP_CODESEPARATOR, "OP_CODESEPARATOR", 1, opcodeCodeSeparator}, + OP_CODESEPARATOR: {OP_CODESEPARATOR, "OP_CODESEPARATOR", 1, opcodeDisabled}, // Disabled OP_CHECKSIG: {OP_CHECKSIG, "OP_CHECKSIG", 1, opcodeCheckSig}, OP_CHECKSIGVERIFY: {OP_CHECKSIGVERIFY, "OP_CHECKSIGVERIFY", 1, opcodeCheckSigVerify}, OP_CHECKMULTISIG: {OP_CHECKMULTISIG, "OP_CHECKMULTISIG", 1, opcodeCheckMultiSig}, @@ -495,7 +507,6 @@ var opcodeArray = [256]opcode{ // Reserved opcodes. OP_NOP1: {OP_NOP1, "OP_NOP1", 1, opcodeNop}, - OP_NOP2: {OP_NOP2, "OP_NOP2", 1, opcodeNop}, OP_NOP3: {OP_NOP3, "OP_NOP3", 1, opcodeNop}, OP_NOP4: {OP_NOP4, "OP_NOP4", 1, opcodeNop}, OP_NOP5: {OP_NOP5, "OP_NOP5", 1, opcodeNop}, @@ -505,70 +516,74 @@ var opcodeArray = [256]opcode{ OP_NOP9: {OP_NOP9, "OP_NOP9", 1, opcodeNop}, OP_NOP10: {OP_NOP10, "OP_NOP10", 1, opcodeNop}, + // SS* opcodes. + OP_SSTX: {OP_SSTX, "OP_SSTX", 1, opcodeNop}, + OP_SSGEN: {OP_SSGEN, "OP_SSGEN", 1, opcodeNop}, + OP_SSRTX: {OP_SSRTX, "OP_SSRTX", 1, opcodeNop}, + OP_SSTXCHANGE: {OP_SSTXCHANGE, "OP_SSTXCHANGE", 1, opcodeNop}, + + // Alternative checksig opcode. + OP_CHECKSIGALT: {OP_CHECKSIGALT, "OP_CHECKSIGALT", 1, opcodeCheckSigAlt}, + OP_CHECKSIGALTVERIFY: {OP_CHECKSIGALTVERIFY, "OP_CHECKSIGALTVERIFY", 1, opcodeCheckSigAltVerify}, + // Undefined opcodes. - OP_UNKNOWN186: {OP_UNKNOWN186, "OP_UNKNOWN186", 1, opcodeInvalid}, - OP_UNKNOWN187: {OP_UNKNOWN187, "OP_UNKNOWN187", 1, opcodeInvalid}, - OP_UNKNOWN188: {OP_UNKNOWN188, "OP_UNKNOWN188", 1, opcodeInvalid}, - OP_UNKNOWN189: {OP_UNKNOWN189, "OP_UNKNOWN189", 1, opcodeInvalid}, - OP_UNKNOWN190: {OP_UNKNOWN190, "OP_UNKNOWN190", 1, opcodeInvalid}, - OP_UNKNOWN191: {OP_UNKNOWN191, "OP_UNKNOWN191", 1, opcodeInvalid}, - OP_UNKNOWN192: {OP_UNKNOWN192, "OP_UNKNOWN192", 1, opcodeInvalid}, - OP_UNKNOWN193: {OP_UNKNOWN193, "OP_UNKNOWN193", 1, opcodeInvalid}, - OP_UNKNOWN194: {OP_UNKNOWN194, "OP_UNKNOWN194", 1, opcodeInvalid}, - OP_UNKNOWN195: {OP_UNKNOWN195, "OP_UNKNOWN195", 1, opcodeInvalid}, - OP_UNKNOWN196: {OP_UNKNOWN196, "OP_UNKNOWN196", 1, opcodeInvalid}, - OP_UNKNOWN197: {OP_UNKNOWN197, "OP_UNKNOWN197", 1, opcodeInvalid}, - OP_UNKNOWN198: {OP_UNKNOWN198, "OP_UNKNOWN198", 1, opcodeInvalid}, - OP_UNKNOWN199: {OP_UNKNOWN199, "OP_UNKNOWN199", 1, opcodeInvalid}, - OP_UNKNOWN200: {OP_UNKNOWN200, "OP_UNKNOWN200", 1, opcodeInvalid}, - OP_UNKNOWN201: {OP_UNKNOWN201, "OP_UNKNOWN201", 1, opcodeInvalid}, - OP_UNKNOWN202: {OP_UNKNOWN202, "OP_UNKNOWN202", 1, opcodeInvalid}, - OP_UNKNOWN203: {OP_UNKNOWN203, "OP_UNKNOWN203", 1, opcodeInvalid}, - OP_UNKNOWN204: {OP_UNKNOWN204, "OP_UNKNOWN204", 1, opcodeInvalid}, - OP_UNKNOWN205: {OP_UNKNOWN205, "OP_UNKNOWN205", 1, opcodeInvalid}, - OP_UNKNOWN206: {OP_UNKNOWN206, "OP_UNKNOWN206", 1, opcodeInvalid}, - OP_UNKNOWN207: {OP_UNKNOWN207, "OP_UNKNOWN207", 1, opcodeInvalid}, - OP_UNKNOWN208: {OP_UNKNOWN208, "OP_UNKNOWN208", 1, opcodeInvalid}, - OP_UNKNOWN209: {OP_UNKNOWN209, "OP_UNKNOWN209", 1, opcodeInvalid}, - OP_UNKNOWN210: {OP_UNKNOWN210, "OP_UNKNOWN210", 1, opcodeInvalid}, - OP_UNKNOWN211: {OP_UNKNOWN211, "OP_UNKNOWN211", 1, opcodeInvalid}, - OP_UNKNOWN212: {OP_UNKNOWN212, "OP_UNKNOWN212", 1, opcodeInvalid}, - OP_UNKNOWN213: {OP_UNKNOWN213, "OP_UNKNOWN213", 1, opcodeInvalid}, - OP_UNKNOWN214: {OP_UNKNOWN214, "OP_UNKNOWN214", 1, opcodeInvalid}, - OP_UNKNOWN215: {OP_UNKNOWN215, "OP_UNKNOWN215", 1, opcodeInvalid}, - OP_UNKNOWN216: {OP_UNKNOWN216, "OP_UNKNOWN216", 1, opcodeInvalid}, - OP_UNKNOWN217: {OP_UNKNOWN217, "OP_UNKNOWN217", 1, opcodeInvalid}, - OP_UNKNOWN218: {OP_UNKNOWN218, "OP_UNKNOWN218", 1, opcodeInvalid}, - OP_UNKNOWN219: {OP_UNKNOWN219, "OP_UNKNOWN219", 1, opcodeInvalid}, - OP_UNKNOWN220: {OP_UNKNOWN220, "OP_UNKNOWN220", 1, opcodeInvalid}, - OP_UNKNOWN221: {OP_UNKNOWN221, "OP_UNKNOWN221", 1, opcodeInvalid}, - OP_UNKNOWN222: {OP_UNKNOWN222, "OP_UNKNOWN222", 1, opcodeInvalid}, - OP_UNKNOWN223: {OP_UNKNOWN223, "OP_UNKNOWN223", 1, opcodeInvalid}, - OP_UNKNOWN224: {OP_UNKNOWN224, "OP_UNKNOWN224", 1, opcodeInvalid}, - OP_UNKNOWN225: {OP_UNKNOWN225, "OP_UNKNOWN225", 1, opcodeInvalid}, - OP_UNKNOWN226: {OP_UNKNOWN226, "OP_UNKNOWN226", 1, opcodeInvalid}, - OP_UNKNOWN227: {OP_UNKNOWN227, "OP_UNKNOWN227", 1, opcodeInvalid}, - OP_UNKNOWN228: {OP_UNKNOWN228, "OP_UNKNOWN228", 1, opcodeInvalid}, - OP_UNKNOWN229: {OP_UNKNOWN229, "OP_UNKNOWN229", 1, opcodeInvalid}, - OP_UNKNOWN230: {OP_UNKNOWN230, "OP_UNKNOWN230", 1, opcodeInvalid}, - OP_UNKNOWN231: {OP_UNKNOWN231, "OP_UNKNOWN231", 1, opcodeInvalid}, - OP_UNKNOWN232: {OP_UNKNOWN232, "OP_UNKNOWN232", 1, opcodeInvalid}, - OP_UNKNOWN233: {OP_UNKNOWN233, "OP_UNKNOWN233", 1, opcodeInvalid}, - OP_UNKNOWN234: {OP_UNKNOWN234, "OP_UNKNOWN234", 1, opcodeInvalid}, - OP_UNKNOWN235: {OP_UNKNOWN235, "OP_UNKNOWN235", 1, opcodeInvalid}, - OP_UNKNOWN236: {OP_UNKNOWN236, "OP_UNKNOWN236", 1, opcodeInvalid}, - OP_UNKNOWN237: {OP_UNKNOWN237, "OP_UNKNOWN237", 1, opcodeInvalid}, - OP_UNKNOWN238: {OP_UNKNOWN238, "OP_UNKNOWN238", 1, opcodeInvalid}, - OP_UNKNOWN239: {OP_UNKNOWN239, "OP_UNKNOWN239", 1, opcodeInvalid}, - OP_UNKNOWN240: {OP_UNKNOWN240, "OP_UNKNOWN240", 1, opcodeInvalid}, - OP_UNKNOWN241: {OP_UNKNOWN241, "OP_UNKNOWN241", 1, opcodeInvalid}, - OP_UNKNOWN242: {OP_UNKNOWN242, "OP_UNKNOWN242", 1, opcodeInvalid}, - OP_UNKNOWN243: {OP_UNKNOWN243, "OP_UNKNOWN243", 1, opcodeInvalid}, - OP_UNKNOWN244: {OP_UNKNOWN244, "OP_UNKNOWN244", 1, opcodeInvalid}, - OP_UNKNOWN245: {OP_UNKNOWN245, "OP_UNKNOWN245", 1, opcodeInvalid}, - OP_UNKNOWN246: {OP_UNKNOWN246, "OP_UNKNOWN246", 1, opcodeInvalid}, - OP_UNKNOWN247: {OP_UNKNOWN247, "OP_UNKNOWN247", 1, opcodeInvalid}, - OP_UNKNOWN248: {OP_UNKNOWN248, "OP_UNKNOWN248", 1, opcodeInvalid}, + OP_UNKNOWN192: {OP_UNKNOWN192, "OP_UNKNOWN192", 1, opcodeNop}, + OP_UNKNOWN193: {OP_UNKNOWN193, "OP_UNKNOWN193", 1, opcodeNop}, + OP_UNKNOWN194: {OP_UNKNOWN194, "OP_UNKNOWN194", 1, opcodeNop}, + OP_UNKNOWN195: {OP_UNKNOWN195, "OP_UNKNOWN195", 1, opcodeNop}, + OP_UNKNOWN196: {OP_UNKNOWN196, "OP_UNKNOWN196", 1, opcodeNop}, + OP_UNKNOWN197: {OP_UNKNOWN197, "OP_UNKNOWN197", 1, opcodeNop}, + OP_UNKNOWN198: {OP_UNKNOWN198, "OP_UNKNOWN198", 1, opcodeNop}, + OP_UNKNOWN199: {OP_UNKNOWN199, "OP_UNKNOWN199", 1, opcodeNop}, + OP_UNKNOWN200: {OP_UNKNOWN200, "OP_UNKNOWN200", 1, opcodeNop}, + OP_UNKNOWN201: {OP_UNKNOWN201, "OP_UNKNOWN201", 1, opcodeNop}, + OP_UNKNOWN202: {OP_UNKNOWN202, "OP_UNKNOWN202", 1, opcodeNop}, + OP_UNKNOWN203: {OP_UNKNOWN203, "OP_UNKNOWN203", 1, opcodeNop}, + OP_UNKNOWN204: {OP_UNKNOWN204, "OP_UNKNOWN204", 1, opcodeNop}, + OP_UNKNOWN205: {OP_UNKNOWN205, "OP_UNKNOWN205", 1, opcodeNop}, + OP_UNKNOWN206: {OP_UNKNOWN206, "OP_UNKNOWN206", 1, opcodeNop}, + OP_UNKNOWN207: {OP_UNKNOWN207, "OP_UNKNOWN207", 1, opcodeNop}, + OP_UNKNOWN208: {OP_UNKNOWN208, "OP_UNKNOWN208", 1, opcodeNop}, + OP_UNKNOWN209: {OP_UNKNOWN209, "OP_UNKNOWN209", 1, opcodeNop}, + OP_UNKNOWN210: {OP_UNKNOWN210, "OP_UNKNOWN210", 1, opcodeNop}, + OP_UNKNOWN211: {OP_UNKNOWN211, "OP_UNKNOWN211", 1, opcodeNop}, + OP_UNKNOWN212: {OP_UNKNOWN212, "OP_UNKNOWN212", 1, opcodeNop}, + OP_UNKNOWN213: {OP_UNKNOWN213, "OP_UNKNOWN213", 1, opcodeNop}, + OP_UNKNOWN214: {OP_UNKNOWN214, "OP_UNKNOWN214", 1, opcodeNop}, + OP_UNKNOWN215: {OP_UNKNOWN215, "OP_UNKNOWN215", 1, opcodeNop}, + OP_UNKNOWN216: {OP_UNKNOWN216, "OP_UNKNOWN216", 1, opcodeNop}, + OP_UNKNOWN217: {OP_UNKNOWN217, "OP_UNKNOWN217", 1, opcodeNop}, + OP_UNKNOWN218: {OP_UNKNOWN218, "OP_UNKNOWN218", 1, opcodeNop}, + OP_UNKNOWN219: {OP_UNKNOWN219, "OP_UNKNOWN219", 1, opcodeNop}, + OP_UNKNOWN220: {OP_UNKNOWN220, "OP_UNKNOWN220", 1, opcodeNop}, + OP_UNKNOWN221: {OP_UNKNOWN221, "OP_UNKNOWN221", 1, opcodeNop}, + OP_UNKNOWN222: {OP_UNKNOWN222, "OP_UNKNOWN222", 1, opcodeNop}, + OP_UNKNOWN223: {OP_UNKNOWN223, "OP_UNKNOWN223", 1, opcodeNop}, + OP_UNKNOWN224: {OP_UNKNOWN224, "OP_UNKNOWN224", 1, opcodeNop}, + OP_UNKNOWN225: {OP_UNKNOWN225, "OP_UNKNOWN225", 1, opcodeNop}, + OP_UNKNOWN226: {OP_UNKNOWN226, "OP_UNKNOWN226", 1, opcodeNop}, + OP_UNKNOWN227: {OP_UNKNOWN227, "OP_UNKNOWN227", 1, opcodeNop}, + OP_UNKNOWN228: {OP_UNKNOWN228, "OP_UNKNOWN228", 1, opcodeNop}, + OP_UNKNOWN229: {OP_UNKNOWN229, "OP_UNKNOWN229", 1, opcodeNop}, + OP_UNKNOWN230: {OP_UNKNOWN230, "OP_UNKNOWN230", 1, opcodeNop}, + OP_UNKNOWN231: {OP_UNKNOWN231, "OP_UNKNOWN231", 1, opcodeNop}, + OP_UNKNOWN232: {OP_UNKNOWN232, "OP_UNKNOWN232", 1, opcodeNop}, + OP_UNKNOWN233: {OP_UNKNOWN233, "OP_UNKNOWN233", 1, opcodeNop}, + OP_UNKNOWN234: {OP_UNKNOWN234, "OP_UNKNOWN234", 1, opcodeNop}, + OP_UNKNOWN235: {OP_UNKNOWN235, "OP_UNKNOWN235", 1, opcodeNop}, + OP_UNKNOWN236: {OP_UNKNOWN236, "OP_UNKNOWN236", 1, opcodeNop}, + OP_UNKNOWN237: {OP_UNKNOWN237, "OP_UNKNOWN237", 1, opcodeNop}, + OP_UNKNOWN238: {OP_UNKNOWN238, "OP_UNKNOWN238", 1, opcodeNop}, + OP_UNKNOWN239: {OP_UNKNOWN239, "OP_UNKNOWN239", 1, opcodeNop}, + OP_UNKNOWN240: {OP_UNKNOWN240, "OP_UNKNOWN240", 1, opcodeNop}, + OP_UNKNOWN241: {OP_UNKNOWN241, "OP_UNKNOWN241", 1, opcodeNop}, + OP_UNKNOWN242: {OP_UNKNOWN242, "OP_UNKNOWN242", 1, opcodeNop}, + OP_UNKNOWN243: {OP_UNKNOWN243, "OP_UNKNOWN243", 1, opcodeNop}, + OP_UNKNOWN244: {OP_UNKNOWN244, "OP_UNKNOWN244", 1, opcodeNop}, + OP_UNKNOWN245: {OP_UNKNOWN245, "OP_UNKNOWN245", 1, opcodeNop}, + OP_UNKNOWN246: {OP_UNKNOWN246, "OP_UNKNOWN246", 1, opcodeNop}, + OP_UNKNOWN247: {OP_UNKNOWN247, "OP_UNKNOWN247", 1, opcodeNop}, + OP_UNKNOWN248: {OP_UNKNOWN248, "OP_UNKNOWN248", 1, opcodeNop}, // Bitcoin Core internal use opcode. Defined here for completeness. OP_SMALLDATA: {OP_SMALLDATA, "OP_SMALLDATA", 1, opcodeInvalid}, @@ -617,35 +632,7 @@ type parsedOpcode struct { // bad to see in the instruction stream (even if turned off by a conditional). func (pop *parsedOpcode) isDisabled() bool { switch pop.opcode.value { - case OP_CAT: - return true - case OP_SUBSTR: - return true - case OP_LEFT: - return true - case OP_RIGHT: - return true - case OP_INVERT: - return true - case OP_AND: - return true - case OP_OR: - return true - case OP_XOR: - return true - case OP_2MUL: - return true - case OP_2DIV: - return true - case OP_MUL: - return true - case OP_DIV: - return true - case OP_MOD: - return true - case OP_LSHIFT: - return true - case OP_RSHIFT: + case OP_CODESEPARATOR: return true default: return false @@ -873,11 +860,40 @@ func opcodeN(op *parsedOpcode, vm *Engine) error { // the flag to discourage use of NOPs is set for select opcodes. func opcodeNop(op *parsedOpcode, vm *Engine) error { switch op.opcode.value { - case OP_NOP1, OP_NOP2, OP_NOP3, OP_NOP4, OP_NOP5, - OP_NOP6, OP_NOP7, OP_NOP8, OP_NOP9, OP_NOP10: + case OP_NOP1, OP_NOP3, OP_NOP4, OP_NOP5, + OP_NOP6, OP_NOP7, OP_NOP8, OP_NOP9, OP_NOP10, + OP_UNKNOWN192, OP_UNKNOWN193, + OP_UNKNOWN194, OP_UNKNOWN195, + OP_UNKNOWN196, OP_UNKNOWN197, + OP_UNKNOWN198, OP_UNKNOWN199, + OP_UNKNOWN200, OP_UNKNOWN201, + OP_UNKNOWN202, OP_UNKNOWN203, + OP_UNKNOWN204, OP_UNKNOWN205, + OP_UNKNOWN206, OP_UNKNOWN207, + OP_UNKNOWN208, OP_UNKNOWN209, + OP_UNKNOWN210, OP_UNKNOWN211, + OP_UNKNOWN212, OP_UNKNOWN213, + OP_UNKNOWN214, OP_UNKNOWN215, + OP_UNKNOWN216, OP_UNKNOWN217, + OP_UNKNOWN218, OP_UNKNOWN219, + OP_UNKNOWN220, OP_UNKNOWN221, + OP_UNKNOWN222, OP_UNKNOWN223, + OP_UNKNOWN224, OP_UNKNOWN225, + OP_UNKNOWN226, OP_UNKNOWN227, + OP_UNKNOWN228, OP_UNKNOWN229, + OP_UNKNOWN230, OP_UNKNOWN231, + OP_UNKNOWN232, OP_UNKNOWN233, + OP_UNKNOWN234, OP_UNKNOWN235, + OP_UNKNOWN236, OP_UNKNOWN237, + OP_UNKNOWN238, OP_UNKNOWN239, + OP_UNKNOWN240, OP_UNKNOWN241, + OP_UNKNOWN242, OP_UNKNOWN243, + OP_UNKNOWN244, OP_UNKNOWN245, + OP_UNKNOWN246, OP_UNKNOWN247, + OP_UNKNOWN248: if vm.hasFlag(ScriptDiscourageUpgradableNops) { - return fmt.Errorf("OP_NOP%d reserved for soft-fork "+ - "upgrades", op.opcode.value-(OP_NOP1-1)) + return fmt.Errorf("OP_NOP at %d reserved for soft-fork "+ + "upgrades", op.opcode.value) } } return nil @@ -1006,6 +1022,86 @@ func opcodeReturn(op *parsedOpcode, vm *Engine) error { return ErrStackEarlyReturn } +// opcodeCheckLockTimeVerify compares the top item on the data stack to the +// LockTime field of the transaction containing the script signature +// validating if the transaction outputs are spendable yet. If flag +// ScriptVerifyCheckLockTimeVerify is not set, the code continues as if OP_NOP2 +// were executed. +func opcodeCheckLockTimeVerify(op *parsedOpcode, vm *Engine) error { + // If the ScriptVerifyCheckLockTimeVerify script flag is not set, treat + // opcode as OP_NOP2 instead. + if !vm.hasFlag(ScriptVerifyCheckLockTimeVerify) { + if vm.hasFlag(ScriptDiscourageUpgradableNops) { + return errors.New("OP_NOP2 reserved for soft-fork " + + "upgrades") + } + return nil + } + + // The current transaction locktime is a uint32 resulting in a maximum + // locktime of 2^32-1 (the year 2106). However, scriptNums are signed + // and therefore a standard 4-byte scriptNum would only support up to a + // maximum of 2^31-1 (the year 2038). Thus, a 5-byte scriptNum is used + // here since it will support up to 2^39-1 which allows dates beyond the + // current locktime limit. + // + // PeekByteArray is used here instead of PeekInt because we do not want + // to be limited to a 4-byte integer for reasons specified above. + so, err := vm.dstack.PeekByteArray(0) + if err != nil { + return err + } + lockTime, err := makeScriptNum(so, vm.dstack.verifyMinimalData, 5) + if err != nil { + return err + } + + // In the rare event that the argument may be < 0 due to some arithmetic + // being done first, you can always use 0 OP_MAX OP_CHECKLOCKTIMEVERIFY. + if lockTime < 0 { + return fmt.Errorf("negative locktime: %d", lockTime) + } + + // The lock time field of a transaction is either a block height at + // which the transaction is finalized or a timestamp depending on if the + // value is before the txscript.LockTimeThreshold. When it is under the + // threshold it is a block height. + // + // The lockTimes in both the script and transaction must be of the same + // type. + if !((vm.tx.LockTime < LockTimeThreshold && int64(lockTime) < int64(LockTimeThreshold)) || + (vm.tx.LockTime >= LockTimeThreshold && int64(lockTime) >= int64(LockTimeThreshold))) { + return fmt.Errorf("mismatched locktime types -- tx locktime %d, stack "+ + "locktime %d", vm.tx.LockTime, lockTime) + } + + if int64(lockTime) > int64(vm.tx.LockTime) { + str := "locktime requirement not satisfied -- locktime is " + + "greater than the transaction locktime: %d > %d" + return fmt.Errorf(str, lockTime, vm.tx.LockTime) + } + + // The lock time feature can also be disabled, thereby bypassing + // OP_CHECKLOCKTIMEVERIFY, if every transaction input has been finalized by + // setting its sequence to the maximum value (wire.MaxTxInSequenceNum). This + // condition would result in the transaction being allowed into the blockchain + // making the opcode ineffective. + // + // This condition is prevented by enforcing that the input being used by + // the opcode is unlocked (its sequence number is less than the max + // value). This is sufficient to prove correctness without having to + // check every input. + // + // NOTE: This implies that even if the transaction is not finalized due to + // another input being unlocked, the opcode execution will still fail when the + // input being used by the opcode is locked. + if vm.tx.TxIn[vm.txIdx].Sequence == wire.MaxTxInSequenceNum { + return errors.New("transaction input is finalized") + } + + return nil +} + // opcodeToAltStack removes the top item from the main data stack and pushes it // onto the alternate data stack. // @@ -1143,7 +1239,7 @@ func opcodeOver(op *parsedOpcode, vm *Engine) error { // Example with n=1: [x2 x1 x0 1] -> [x2 x1 x0 x1] // Example with n=2: [x2 x1 x0 2] -> [x2 x1 x0 x2] func opcodePick(op *parsedOpcode, vm *Engine) error { - val, err := vm.dstack.PopInt() + val, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1158,7 +1254,7 @@ func opcodePick(op *parsedOpcode, vm *Engine) error { // Example with n=1: [x2 x1 x0 1] -> [x2 x0 x1] // Example with n=2: [x2 x1 x0 2] -> [x1 x0 x2] func opcodeRoll(op *parsedOpcode, vm *Engine) error { - val, err := vm.dstack.PopInt() + val, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1188,6 +1284,177 @@ func opcodeTuck(op *parsedOpcode, vm *Engine) error { return vm.dstack.Tuck() } +// opcodeCat concatenates the top two stack elements after popping them off, then +// pushes the result back onto the stack. The opcode fails if the concatenated +// stack element is too large. +// Stack transformation: [... x1 x2] -> [... x1 || x2] +func opcodeCat(op *parsedOpcode, vm *Engine) error { + a, err := vm.dstack.PopByteArray() // x2 + if err != nil { + return err + } + b, err := vm.dstack.PopByteArray() // x1 + if err != nil { + return err + } + + // Handle zero length byte slice cases. If one or both of the top stack + // elements are nil, it's impossible for them to overflow the stack item + // when either is pushed back on. If both stack items are empty, push an + // empty byte slice back onto the stack. + switch { + case len(a) == 0 && len(b) > 0: + vm.dstack.PushByteArray(b) + return nil + case len(b) == 0 && len(a) > 0: + vm.dstack.PushByteArray(a) + return nil + case len(b) == 0 && len(a) == 0: + vm.dstack.PushByteArray(nil) + return nil + } + + // We can't overflow the maximum stack item size. + if len(a)+len(b) > MaxScriptElementSize { + return ErrStackElementTooBig + } + + c := append(b, a...) + + vm.dstack.PushByteArray(c) + return nil +} + +// opcodeSubstr pops off the top two stack elements and interprets them as +// integers. If the indices indicated exist within the next stack item that is +// also popped off, return the relevant substring based on the given start and +// end indexes. +// Stack transformation: [... x1 x2 x3] -> [... x1[x3:x2]] +func opcodeSubstr(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x3 + if err != nil { + return err + } + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + a, err := vm.dstack.PopByteArray() // x1 + if err != nil { + return err + } + aLen := len(a) + + // Golang uses ints for the indices of slices. Assume that we can get + // whatever we need from a slice within the boundaries of an int32 + // register. + v0Recast := int(v0.Int32()) + v1Recast := int(v1.Int32()) + + if aLen == 0 { + vm.dstack.PushByteArray(nil) + return nil + } + if v0Recast < 0 || v1Recast < 0 { + return ErrSubstrIdxNegative + } + if v0Recast > aLen { + return ErrSubstrIdxOutOfBounds + } + if v1Recast > aLen { + return ErrSubstrIdxOutOfBounds + } + if v0Recast > v1Recast { + return ErrSubstrIdxOutOfBounds + } + + // A substr of the same indices return an empty stack item, similar to + // Golang. + if v0Recast == v1Recast { + vm.dstack.PushByteArray(nil) + return nil + } + + vm.dstack.PushByteArray(a[v0Recast:v1Recast]) + return nil +} + +// opcodeLeft pops the first item off the stack as an int and the second item off +// the stack as a slice. The opcode then prunes the second item from the start +// index to the given int. Similar to substr, see above comments. +// Stack transformation: [... x1 x2] -> [... x1[:x2]] +func opcodeLeft(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + a, err := vm.dstack.PopByteArray() // x1 + if err != nil { + return err + } + aLen := len(a) + + v0Recast := int(v0.Int32()) + + if aLen == 0 { + vm.dstack.PushByteArray(nil) + return nil + } + if v0Recast < 0 { + return ErrSubstrIdxNegative + } + if v0Recast > aLen { + return ErrSubstrIdxOutOfBounds + } + + // x1[:0] + if v0Recast == 0 { + vm.dstack.PushByteArray(nil) + return nil + } + + vm.dstack.PushByteArray(a[:v0Recast]) + return nil +} + +// opcodeRight pops the first item off the stack as an int and the second item off +// the stack as a slice. The opcode then prunes the second item from the given int +// index to ending index. Similar to substr, see above comments. +// Stack transformation: [... x1 x2] -> [... x1[x2:]] +func opcodeRight(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + a, err := vm.dstack.PopByteArray() // x1 + if err != nil { + return err + } + aLen := len(a) + + v0Recast := int(v0.Int32()) + + if aLen == 0 { + vm.dstack.PushByteArray(nil) + return nil + } + if v0Recast < 0 { + return ErrSubstrIdxNegative + } + if v0Recast > aLen { + return ErrSubstrIdxOutOfBounds + } + + // x1[len(a):] + if v0Recast == aLen { + vm.dstack.PushByteArray(nil) + return nil + } + + vm.dstack.PushByteArray(a[v0Recast:]) + return nil +} + // opcodeSize pushes the size of the top item of the data stack onto the data // stack. // @@ -1202,6 +1469,73 @@ func opcodeSize(op *parsedOpcode, vm *Engine) error { return nil } +// opcodeInvert pops the top item off the stack, interprets it as an int32, +// inverts the bits, and then pushes it back to the stack. +// Stack transformation: [... x1] -> [... ~x1] +func opcodeInvert(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + vm.dstack.PushInt(scriptNum(^v0.Int32())) + return nil +} + +// opcodeAnd pops the top two items off the stack, interprets them as int32s, +// bitwise ANDs the value, and then pushes the result back to the stack. +// Stack transformation: [... x1 x2] -> [... x1 & x2] +func opcodeAnd(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + vm.dstack.PushInt(scriptNum(v0.Int32() & v1.Int32())) + return nil +} + +// opcodeOr pops the top two items off the stack, interprets them as int32s, +// bitwise ORs the value, and then pushes the result back to the stack. +// Stack transformation: [... x1 x2] -> [... x1 | x2] +func opcodeOr(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + vm.dstack.PushInt(scriptNum(v0.Int32() | v1.Int32())) + return nil +} + +// opcodeXor pops the top two items off the stack, interprets them as int32s, +// bitwise XORs the value, and then pushes the result back to the stack. +// Stack transformation: [... x1 x2] -> [... x1 ^ x2] +func opcodeXor(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + vm.dstack.PushInt(scriptNum(v0.Int32() ^ v1.Int32())) + return nil +} + // opcodeEqual removes the top 2 items of the data stack, compares them as raw // bytes, and pushes the result, encoded as a boolean, back to the stack. // @@ -1235,12 +1569,86 @@ func opcodeEqualVerify(op *parsedOpcode, vm *Engine) error { return err } +func rotateRight(value int32, count int32) int32 { + v := uint32(value) + c := uint32(count) + return int32((v >> c) | (v << (32 - c))) +} + +// opcodeRotr pushes the top two items off the stack as integers. Both ints are +// interpreted as int32s. The first item becomes the depth to rotate (up to 31), +// while the second item is rotated to the right after recasting to a uint32. The +// rotated item is pushed back to the stack. +// Stack transformation: [... x1 x2] -> [... rotr(x1, x2)] +func opcodeRotr(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x1 + if err != nil { + return err + } + + v032 := v0.Int32() + v132 := v1.Int32() + + // Don't allow invalid or pointless rotations. + if v032 < 0 { + return ErrNegativeRotation + } + if v032 > 31 { + return ErrRotationOverflow + } + + vm.dstack.PushInt(scriptNum(rotateRight(v132, v032))) + return nil +} + +func rotateLeft(value int32, count int32) int32 { + v := uint32(value) + c := uint32(count) + return int32((v << c) | (v >> (32 - c))) +} + +// opcodeRotl pushes the top two items off the stack as integers. Both ints are +// interpreted as int32s. The first item becomes the depth to rotate (up to 31), +// while the second item is rotated to the left after recasting to a uint32. The +// rotated item is pushed back to the stack. +// Stack transformation: [... x1 x2] -> [... rotl(x1, x2)] +func opcodeRotl(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x1 + if err != nil { + return err + } + + v032 := v0.Int32() + v132 := v1.Int32() + + // Don't allow invalid or pointless rotations. + if v032 < 0 { + return ErrNegativeRotation + } + if v032 > 31 { + return ErrRotationOverflow + } + + vm.dstack.PushInt(scriptNum(rotateLeft(v132, v032))) + return nil +} + // opcode1Add treats the top item on the data stack as an integer and replaces // it with its incremented value (plus 1). // // Stack transformation: [... x1 x2] -> [... x1 x2+1] func opcode1Add(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1254,7 +1662,7 @@ func opcode1Add(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... x1 x2-1] func opcode1Sub(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1268,7 +1676,7 @@ func opcode1Sub(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... x1 -x2] func opcodeNegate(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1282,7 +1690,7 @@ func opcodeNegate(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... x1 abs(x2)] func opcodeAbs(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1307,7 +1715,7 @@ func opcodeAbs(op *parsedOpcode, vm *Engine) error { // Stack transformation (x2!=0): [... x1 1] -> [... x1 0] // Stack transformation (x2!=0): [... x1 17] -> [... x1 0] func opcodeNot(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1327,7 +1735,7 @@ func opcodeNot(op *parsedOpcode, vm *Engine) error { // Stack transformation (x2!=0): [... x1 1] -> [... x1 1] // Stack transformation (x2!=0): [... x1 17] -> [... x1 1] func opcode0NotEqual(op *parsedOpcode, vm *Engine) error { - m, err := vm.dstack.PopInt() + m, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1344,12 +1752,12 @@ func opcode0NotEqual(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... x1+x2] func opcodeAdd(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1364,12 +1772,12 @@ func opcodeAdd(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... x1-x2] func opcodeSub(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1378,6 +1786,142 @@ func opcodeSub(op *parsedOpcode, vm *Engine) error { return nil } +// opcodeMul treats the top two items on the data stack as integers and replaces +// them with the result of multiplying the top entry with the second-to-top +// entry as 4-byte integers. +// +// Stack transformation: [... x1 x2] -> [... x1*x2] +func opcodeMul(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v2 := v0.Int32() * v1.Int32() + + vm.dstack.PushInt(scriptNum(v2)) + return nil +} + +// opcodeDiv treats the top two items on the data stack as integers and replaces +// them with the result of dividing the top entry by the second-to-top entry as +// 4-byte integers. +// +// Stack transformation: [... x1 x2] -> [... x1/x2] +func opcodeDiv(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + if v0.Int32() == 0 { + return ErrDivideByZero + } + + v2 := v1.Int32() / v0.Int32() + + vm.dstack.PushInt(scriptNum(v2)) + return nil +} + +// opcodeMod treats the top two items on the data stack as integers and replaces +// them with the result of the modulus the top entry by the second-to-top entry as +// 4-byte integers. +// +// Stack transformation: [... x1 x2] -> [... x1/x2] +func opcodeMod(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) + if err != nil { + return err + } + + if v0.Int32() == 0 { + return ErrDivideByZero + } + + v2 := v1.Int32() % v0.Int32() + + vm.dstack.PushInt(scriptNum(v2)) + return nil +} + +// opcodeLShift pushes the top two items off the stack as integers. Both ints are +// interpreted as int32s. The first item becomes the depth to shift left, while +// the second item is shifted that depth to the left. The shifted item is pushed +// back to the stack as an integer. +// Stack transformation: [... x1 x2] -> [... x1 << x2] +func opcodeLShift(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x1 + if err != nil { + return err + } + + v032 := v0.Int32() + v132 := v1.Int32() + + // Don't allow invalid or pointless shifts. + if v032 < 0 { + return ErrNegativeShift + } + if v032 > 32 { + return ErrShiftOverflow + } + + vm.dstack.PushInt(scriptNum(v132 << uint(v032))) + return nil +} + +// opcodeRShift pushes the top two items off the stack as integers. Both ints are +// interpreted as int32s. The first item becomes the depth to shift right, while +// the second item is shifted that depth to the right. The shifted item is pushed +// back to the stack as an integer. +// Stack transformation: [... x1 x2] -> [... x1 << x2] +func opcodeRShift(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x2 + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) // x1 + if err != nil { + return err + } + + v032 := v0.Int32() + v132 := v1.Int32() + + // Don't allow invalid or pointless shifts. + if v032 < 0 { + return ErrNegativeShift + } + if v032 > 32 { + return ErrShiftOverflow + } + + vm.dstack.PushInt(scriptNum(v132 >> uint(v032))) + return nil +} + // opcodeBoolAnd treats the top two items on the data stack as integers. When // both of them are not zero, they are replaced with a 1, otherwise a 0. // @@ -1386,12 +1930,12 @@ func opcodeSub(op *parsedOpcode, vm *Engine) error { // Stack transformation (x1==0, x2!=0): [... 0 7] -> [... 0] // Stack transformation (x1!=0, x2!=0): [... 4 8] -> [... 1] func opcodeBoolAnd(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1413,12 +1957,12 @@ func opcodeBoolAnd(op *parsedOpcode, vm *Engine) error { // Stack transformation (x1==0, x2!=0): [... 0 7] -> [... 1] // Stack transformation (x1!=0, x2!=0): [... 4 8] -> [... 1] func opcodeBoolOr(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1438,12 +1982,12 @@ func opcodeBoolOr(op *parsedOpcode, vm *Engine) error { // Stack transformation (x1==x2): [... 5 5] -> [... 1] // Stack transformation (x1!=x2): [... 5 7] -> [... 0] func opcodeNumEqual(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1479,12 +2023,12 @@ func opcodeNumEqualVerify(op *parsedOpcode, vm *Engine) error { // Stack transformation (x1==x2): [... 5 5] -> [... 0] // Stack transformation (x1!=x2): [... 5 7] -> [... 1] func opcodeNumNotEqual(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1504,12 +2048,12 @@ func opcodeNumNotEqual(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... bool] func opcodeLessThan(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1529,12 +2073,12 @@ func opcodeLessThan(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... bool] func opcodeGreaterThan(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1553,12 +2097,12 @@ func opcodeGreaterThan(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... bool] func opcodeLessThanOrEqual(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1577,12 +2121,12 @@ func opcodeLessThanOrEqual(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... bool] func opcodeGreaterThanOrEqual(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1601,12 +2145,12 @@ func opcodeGreaterThanOrEqual(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... min(x1, x2)] func opcodeMin(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1625,12 +2169,12 @@ func opcodeMin(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 x2] -> [... max(x1, x2)] func opcodeMax(op *parsedOpcode, vm *Engine) error { - v0, err := vm.dstack.PopInt() + v0, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - v1, err := vm.dstack.PopInt() + v1, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1653,17 +2197,17 @@ func opcodeMax(op *parsedOpcode, vm *Engine) error { // // Stack transformation: [... x1 min max] -> [... bool] func opcodeWithin(op *parsedOpcode, vm *Engine) error { - maxVal, err := vm.dstack.PopInt() + maxVal, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - minVal, err := vm.dstack.PopInt() + minVal, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } - x, err := vm.dstack.PopInt() + x, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1712,46 +2256,46 @@ func opcodeSha1(op *parsedOpcode, vm *Engine) error { } // opcodeSha256 treats the top item of the data stack as raw bytes and replaces -// it with sha256(data). +// it with hash256(data). // -// Stack transformation: [... x1] -> [... sha256(x1)] +// Stack transformation: [... x1] -> [... hash256(x1)] func opcodeSha256(op *parsedOpcode, vm *Engine) error { buf, err := vm.dstack.PopByteArray() if err != nil { return err } - hash := fastsha256.Sum256(buf) + hash := chainhash.HashFuncB(buf) vm.dstack.PushByteArray(hash[:]) return nil } // opcodeHash160 treats the top item of the data stack as raw bytes and replaces -// it with ripemd160(sha256(data)). +// it with ripemd160(hash256(data)). // -// Stack transformation: [... x1] -> [... ripemd160(sha256(x1))] +// Stack transformation: [... x1] -> [... ripemd160(hash256(x1))] func opcodeHash160(op *parsedOpcode, vm *Engine) error { buf, err := vm.dstack.PopByteArray() if err != nil { return err } - hash := fastsha256.Sum256(buf) + hash := chainhash.HashFuncB(buf) vm.dstack.PushByteArray(calcHash(hash[:], ripemd160.New())) return nil } // opcodeHash256 treats the top item of the data stack as raw bytes and replaces -// it with sha256(sha256(data)). +// it with hash256(hash256(data)). // -// Stack transformation: [... x1] -> [... sha256(sha256(x1))] +// Stack transformation: [... x1] -> [... hash256(hash256(x1))] func opcodeHash256(op *parsedOpcode, vm *Engine) error { buf, err := vm.dstack.PopByteArray() if err != nil { return err } - vm.dstack.PushByteArray(wire.DoubleSha256(buf)) + vm.dstack.PushByteArray(chainhash.HashFuncB(chainhash.HashFuncB(buf))) return nil } @@ -1759,6 +2303,7 @@ func opcodeHash256(op *parsedOpcode, vm *Engine) error { // seen OP_CODESEPARATOR which is used during signature checking. // // This opcode does not change the contents of the data stack. +// This opcode is disabled in Decred, as it always returns an engine error. func opcodeCodeSeparator(op *parsedOpcode, vm *Engine) error { vm.lastCodeSep = vm.scriptOff return nil @@ -1829,28 +2374,40 @@ func opcodeCheckSig(op *parsedOpcode, vm *Engine) error { subScript = removeOpcodeByData(subScript, fullSigBytes) // Generate the signature hash based on the signature hash type. - hash := calcSignatureHash(subScript, hashType, &vm.tx, vm.txIdx) - - pubKey, err := btcec.ParsePubKey(pkBytes, btcec.S256()) + var prefixHash *chainhash.Hash + if hashType&sigHashMask == SigHashAll { + if optimizeSigVerification { + ph := vm.tx.CachedTxSha() + prefixHash = ph + } + } + hash, err := calcSignatureHash(subScript, hashType, &vm.tx, vm.txIdx, + prefixHash) if err != nil { vm.dstack.PushBool(false) return nil } - var signature *btcec.Signature + pubKey, err := chainec.Secp256k1.ParsePubKey(pkBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + var signature chainec.Signature if vm.hasFlag(ScriptVerifyStrictEncoding) || vm.hasFlag(ScriptVerifyDERSignatures) { - - signature, err = btcec.ParseDERSignature(sigBytes, btcec.S256()) + signature, err = chainec.Secp256k1.ParseDERSignature(sigBytes) } else { - signature, err = btcec.ParseSignature(sigBytes, btcec.S256()) + signature, err = chainec.Secp256k1.ParseSignature(sigBytes) } if err != nil { vm.dstack.PushBool(false) return nil } - ok := signature.Verify(hash, pubKey) + ok := chainec.Secp256k1.Verify(pubKey, hash, signature.GetR(), + signature.GetS()) vm.dstack.PushBool(ok) return nil } @@ -1873,7 +2430,7 @@ func opcodeCheckSigVerify(op *parsedOpcode, vm *Engine) error { // the same signature multiple times when verify a multisig. type parsedSigInfo struct { signature []byte - parsedSignature *btcec.Signature + parsedSignature chainec.Signature parsed bool } @@ -1882,12 +2439,6 @@ type parsedSigInfo struct { // keys, followed by the integer number of signatures, followed by that many // entries as raw data representing the signatures. // -// Due to a bug in the original Satoshi client implementation, an additional -// dummy argument is also required by the consensus rules, although it is not -// used. The dummy value SHOULD be an OP_0, although that is not required by -// the consensus rules. When the ScriptStrictMultiSig flag is set, it must be -// OP_0. -// // All of the aforementioned stack items are replaced with a bool which // indicates if the requisite number of signatures were successfully verified. // @@ -1897,7 +2448,7 @@ type parsedSigInfo struct { // Stack transformation: // [... dummy [sig ...] numsigs [pubkey ...] numpubkeys] -> [... bool] func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { - numKeys, err := vm.dstack.PopInt() + numKeys, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1920,7 +2471,7 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { pubKeys = append(pubKeys, pubKey) } - numSigs, err := vm.dstack.PopInt() + numSigs, err := vm.dstack.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -1944,23 +2495,6 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { signatures = append(signatures, sigInfo) } - // A bug in the original Satoshi client implementation means one more - // stack value than should be used must be popped. Unfortunately, this - // buggy behavior is now part of the consensus and a hard fork would be - // required to fix it. - dummy, err := vm.dstack.PopByteArray() - if err != nil { - return err - } - - // Since the dummy argument is otherwise not checked, it could be any - // value which unfortunately provides a source of malleability. Thus, - // there is a script flag to force an error when the value is NOT 0. - if vm.hasFlag(ScriptStrictMultiSig) && len(dummy) != 0 { - return fmt.Errorf("multisig dummy argument is not zero length: %d", - len(dummy)) - } - // Get script starting from the most recent OP_CODESEPARATOR. script := vm.subScript() @@ -2003,7 +2537,7 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { signature := rawSig[:len(rawSig)-1] // Only parse and check the signature encoding once. - var parsedSig *btcec.Signature + var parsedSig chainec.Signature if !sigInfo.parsed { if err := vm.checkHashTypeEncoding(hashType); err != nil { return err @@ -2017,11 +2551,9 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { if vm.hasFlag(ScriptVerifyStrictEncoding) || vm.hasFlag(ScriptVerifyDERSignatures) { - parsedSig, err = btcec.ParseDERSignature(signature, - btcec.S256()) + parsedSig, err = chainec.Secp256k1.ParseDERSignature(signature) } else { - parsedSig, err = btcec.ParseSignature(signature, - btcec.S256()) + parsedSig, err = chainec.Secp256k1.ParseSignature(signature) } sigInfo.parsed = true if err != nil { @@ -2043,15 +2575,28 @@ func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { } // Parse the pubkey. - parsedPubKey, err := btcec.ParsePubKey(pubKey, btcec.S256()) + parsedPubKey, err := chainec.Secp256k1.ParsePubKey(pubKey) if err != nil { continue } // Generate the signature hash based on the signature hash type. - hash := calcSignatureHash(script, hashType, &vm.tx, vm.txIdx) + var prefixHash *chainhash.Hash + if hashType&sigHashMask == SigHashAll { + if optimizeSigVerification { + ph := vm.tx.CachedTxSha() + prefixHash = ph + } + } + hash, err := calcSignatureHash(script, hashType, &vm.tx, vm.txIdx, + prefixHash) + if err != nil { + return err + } - if parsedSig.Verify(hash, parsedPubKey) { + //if parsedSig.Verify(hash, parsedPubKey) { + if chainec.Secp256k1.Verify(parsedPubKey, hash, parsedSig.GetR(), + parsedSig.GetS()) { // PubKey verified, move on to the next signature. signatureIdx++ numSignatures-- @@ -2076,17 +2621,212 @@ func opcodeCheckMultiSigVerify(op *parsedOpcode, vm *Engine) error { return err } +// ECDSA signature schemes encoded as a single byte. Secp256k1 traditional +// is non-accessible through CheckSigAlt, but is used elsewhere for in the +// sign function to indicate the type of signature to generate. +type sigTypes uint8 + +var secp = sigTypes(chainec.ECTypeSecp256k1) +var edwards = sigTypes(chainec.ECTypeEdwards) +var secSchnorr = sigTypes(chainec.ECTypeSecSchnorr) + +// opcodeCheckSigAlt accepts a three item stack and pops off the first three +// items. The first item is a signature type (1-255, can not be zero or the +// soft fork will fail). Any unused signature types return true, so that future +// alternative signature methods may be added. The second item popped off the +// stack is the public key; wrong size pubkeys return false. The third item to +// be popped off the stack is the signature along with the hash type at the +// end; wrong sized signatures also return false. +// Failing to parse a pubkey or signature results in false. +// After parsing, the signature and pubkey are verified against the message +// (the hash of this transaction and its input). +func opcodeCheckSigAlt(op *parsedOpcode, vm *Engine) error { + sigType, err := vm.dstack.PopInt(altSigSuitesMaxscriptNumLen) + if err != nil { + return err + } + + switch sigTypes(sigType) { + case sigTypes(0): + // Zero case; pre-softfork clients will return 0 in this case as well. + vm.dstack.PushBool(false) + return nil + case edwards: + break + case secSchnorr: + break + default: + // Caveat: All unknown signature types return true, allowing for future + // softforks with other new signature types. + vm.dstack.PushBool(true) + return nil + } + + pkBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // Check the public key lengths. Only 33-byte compressed secp256k1 keys + // are allowed for secp256k1 Schnorr signatures, which 32 byte keys + // are used for Curve25519. + switch sigTypes(sigType) { + case edwards: + if len(pkBytes) != 32 { + vm.dstack.PushBool(false) + return nil + } + case secSchnorr: + if len(pkBytes) != 33 { + vm.dstack.PushBool(false) + return nil + } + } + + fullSigBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // Schnorr signatures are 65 bytes in length (64 bytes for [r,s] and + // 1 byte appened to the end for hashType). + switch sigTypes(sigType) { + case edwards: + if len(fullSigBytes) != 65 { + vm.dstack.PushBool(false) + return nil + } + case secSchnorr: + if len(fullSigBytes) != 65 { + vm.dstack.PushBool(false) + return nil + } + } + + // Trim off hashtype from the signature string and check if the + // signature and pubkey conform to the strict encoding requirements + // depending on the flags. + // + // NOTE: When the strict encoding flags are set, any errors in the + // signature or public encoding here result in an immediate script error + // (and thus no result bool is pushed to the data stack). This differs + // from the logic below where any errors in parsing the signature is + // treated as the signature failure resulting in false being pushed to + // the data stack. This is required because the more general script + // validation consensus rules do not have the new strict encoding + // requirements enabled by the flags. + hashType := SigHashType(fullSigBytes[len(fullSigBytes)-1]) + sigBytes := fullSigBytes[:len(fullSigBytes)-1] + if err := vm.checkHashTypeEncoding(hashType); err != nil { + return err + } + + // Get the subscript. + subScript := vm.subScript() + + // Remove the signature since there is no way for a signature to sign + // itself. + subScript = removeOpcodeByData(subScript, fullSigBytes) + + // Generate the signature hash based on the signature hash type. + var prefixHash *chainhash.Hash + if hashType&sigHashMask == SigHashAll { + if optimizeSigVerification { + ph := vm.tx.CachedTxSha() + prefixHash = ph + } + } + hash, err := calcSignatureHash(subScript, hashType, &vm.tx, vm.txIdx, + prefixHash) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + // Get the public key from bytes. + var pubKey chainec.PublicKey + switch sigTypes(sigType) { + case edwards: + pubKeyEd, err := chainec.Edwards.ParsePubKey(pkBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + pubKey = pubKeyEd + case secSchnorr: + pubKeySec, err := chainec.SecSchnorr.ParsePubKey(pkBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + pubKey = pubKeySec + } + + // Get the signature from bytes. + var signature chainec.Signature + switch sigTypes(sigType) { + case edwards: + sigEd, err := chainec.Edwards.ParseSignature(sigBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + signature = sigEd + case secSchnorr: + sigSec, err := chainec.SecSchnorr.ParseSignature(sigBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + signature = sigSec + default: + vm.dstack.PushBool(false) + return nil + } + + // Attempt to validate the signature. + switch sigTypes(sigType) { + case edwards: + ok := chainec.Edwards.Verify(pubKey, hash, signature.GetR(), + signature.GetS()) + vm.dstack.PushBool(ok) + return nil + case secSchnorr: + ok := chainec.SecSchnorr.Verify(pubKey, hash, signature.GetR(), + signature.GetS()) + vm.dstack.PushBool(ok) + return nil + } + + // Fallthrough of somekind automatically results in false, but + // this should never be hit. + vm.dstack.PushBool(false) + return nil +} + +// opcodeCheckSigAltVerify is a combination of opcodeCheckSigAlt and +// opcodeVerify. The opcodeCheckSigAlt is invoked followed by opcodeVerify. +func opcodeCheckSigAltVerify(op *parsedOpcode, vm *Engine) error { + err := opcodeCheckSigAlt(op, vm) + if err == nil { + err = opcodeVerify(op, vm) + } + return err +} + // OpcodeByName is a map that can be used to lookup an opcode by its // human-readable name (OP_CHECKMULTISIG, OP_CHECKSIG, etc). var OpcodeByName = make(map[string]byte) func init() { // Initialize the opcode name to value map using the contents of the - // opcode array. Also add entries for "OP_FALSE" and "OP_TRUE" since - // they are aliases for "OP_0" and "OP_1", respectively. + // opcode array. Also add entries for "OP_FALSE", "OP_TRUE", and + // "OP_NOP2" since they are aliases for "OP_0", "OP_1", + // and "OP_CHECKLOCKTIMEVERIFY" respectively. for _, op := range opcodeArray { OpcodeByName[op.name] = op.value } OpcodeByName["OP_FALSE"] = OP_FALSE OpcodeByName["OP_TRUE"] = OP_TRUE + OpcodeByName["OP_NOP2"] = OP_CHECKLOCKTIMEVERIFY } diff --git a/txscript/opcode_test.go b/txscript/opcode_test.go index 0c31fc0b..c10f7b88 100644 --- a/txscript/opcode_test.go +++ b/txscript/opcode_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,9 +8,12 @@ package txscript import ( "bytes" "fmt" + "math/rand" "strconv" "strings" "testing" + + "github.com/decred/dcrd/wire" ) // TestOpcodeDisabled tests the opcodeDisabled function manually because all @@ -17,7 +21,6 @@ import ( // so the function is not called under normal circumstances. func TestOpcodeDisabled(t *testing.T) { t.Parallel() - tests := []byte{OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_INVERT, OP_AND, OP_OR, OP_2MUL, OP_2DIV, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT, @@ -59,8 +62,8 @@ func TestOpcodeDisasm(t *testing.T) { 0x7e: "OP_CAT", 0x7f: "OP_SUBSTR", 0x80: "OP_LEFT", 0x81: "OP_RIGHT", 0x82: "OP_SIZE", 0x83: "OP_INVERT", 0x84: "OP_AND", 0x85: "OP_OR", 0x86: "OP_XOR", - 0x87: "OP_EQUAL", 0x88: "OP_EQUALVERIFY", 0x89: "OP_RESERVED1", - 0x8a: "OP_RESERVED2", 0x8b: "OP_1ADD", 0x8c: "OP_1SUB", + 0x87: "OP_EQUAL", 0x88: "OP_EQUALVERIFY", 0x89: "OP_ROTR", + 0x8a: "OP_ROTL", 0x8b: "OP_1ADD", 0x8c: "OP_1SUB", 0x8d: "OP_2MUL", 0x8e: "OP_2DIV", 0x8f: "OP_NEGATE", 0x90: "OP_ABS", 0x91: "OP_NOT", 0x92: "OP_0NOTEQUAL", 0x93: "OP_ADD", 0x94: "OP_SUB", 0x95: "OP_MUL", 0x96: "OP_DIV", @@ -76,7 +79,9 @@ func TestOpcodeDisasm(t *testing.T) { 0xae: "OP_CHECKMULTISIG", 0xaf: "OP_CHECKMULTISIGVERIFY", 0xf9: "OP_SMALLDATA", 0xfa: "OP_SMALLINTEGER", 0xfb: "OP_PUBKEYS", 0xfd: "OP_PUBKEYHASH", 0xfe: "OP_PUBKEY", - 0xff: "OP_INVALIDOPCODE", + 0xff: "OP_INVALIDOPCODE", 0xba: "OP_SSTX", 0xbb: "OP_SSGEN", + 0xbc: "OP_SSRTX", 0xbd: "OP_SSTXCHANGE", 0xbe: "OP_CHECKSIGALT", + 0xbf: "OP_CHECKSIGALTVERIFY", } for opcodeVal, expectedStr := range expectedStrings { var data []byte @@ -109,11 +114,16 @@ func TestOpcodeDisasm(t *testing.T) { // OP_NOP1 through OP_NOP10. case opcodeVal >= 0xb0 && opcodeVal <= 0xb9: - val := byte(opcodeVal - (0xb0 - 1)) - expectedStr = "OP_NOP" + strconv.Itoa(int(val)) + // OP_NOP2 is an alias of OP_CHECKLOCKTIMEVERIFY + if opcodeVal == 0xb1 { + expectedStr = "OP_CHECKLOCKTIMEVERIFY" + } else { + val := byte(opcodeVal - (0xb0 - 1)) + expectedStr = "OP_NOP" + strconv.Itoa(int(val)) + } // OP_UNKNOWN#. - case opcodeVal >= 0xba && opcodeVal <= 0xf8 || opcodeVal == 0xfc: + case opcodeVal >= 0xc0 && opcodeVal <= 0xf8 || opcodeVal == 0xfc: expectedStr = "OP_UNKNOWN" + strconv.Itoa(int(opcodeVal)) } @@ -166,11 +176,16 @@ func TestOpcodeDisasm(t *testing.T) { // OP_NOP1 through OP_NOP10. case opcodeVal >= 0xb0 && opcodeVal <= 0xb9: - val := byte(opcodeVal - (0xb0 - 1)) - expectedStr = "OP_NOP" + strconv.Itoa(int(val)) + // OP_NOP2 is an alias of OP_CHECKLOCKTIMEVERIFY + if opcodeVal == 0xb1 { + expectedStr = "OP_CHECKLOCKTIMEVERIFY" + } else { + val := byte(opcodeVal - (0xb0 - 1)) + expectedStr = "OP_NOP" + strconv.Itoa(int(val)) + } // OP_UNKNOWN#. - case opcodeVal >= 0xba && opcodeVal <= 0xf8 || opcodeVal == 0xfc: + case opcodeVal >= 0xc0 && opcodeVal <= 0xf8 || opcodeVal == 0xfc: expectedStr = "OP_UNKNOWN" + strconv.Itoa(int(opcodeVal)) } @@ -184,3 +199,349 @@ func TestOpcodeDisasm(t *testing.T) { } } } + +func TestNewlyEnabledOpCodes(t *testing.T) { + sigScriptMath := []byte{ + 0x04, + 0xff, 0xff, 0xff, 0x7f, + 0x04, + 0xee, 0xee, 0xee, 0x6e, + } + sigScriptShift := []byte{ + 0x04, + 0xff, 0xff, 0xff, 0x7f, + 0x53, + } + sigScriptRot := []byte{ + 0x04, + 0x21, 0x12, 0x34, 0x56, + 0x53, + } + sigScriptInv := []byte{ + 0x04, + 0xff, 0x00, 0xf0, 0x0f, + } + sigScriptLogic := []byte{ + 0x04, + 0x21, 0x12, 0x34, 0x56, + 0x04, + 0x0f, 0xf0, 0x00, 0xff, + } + sigScriptCat := []byte{ + 0x06, + 0x21, 0x12, 0x34, 0x56, 0x44, 0x55, + 0x06, + 0x0f, 0xf0, 0x00, 0xff, 0x88, 0x99, + } + lotsOf01s := bytes.Repeat([]byte{0x01}, 2050) + builder := NewScriptBuilder() + builder.AddData(lotsOf01s).AddData(lotsOf01s) + sigScriptCatOverflow, _ := builder.Script() + sigScriptSubstr := []byte{ + 0x08, + 0x21, 0x12, 0x34, 0x56, 0x59, 0x32, 0x40, 0x21, + 0x56, + 0x52, + } + sigScriptLR := []byte{ + 0x08, + 0x21, 0x12, 0x34, 0x56, 0x59, 0x32, 0x40, 0x21, + 0x54, + } + + tests := []struct { + name string + pkScript []byte + sigScript []byte + expected bool + }{ + { + name: "add", + pkScript: []byte{ + 0x93, // OP_ADD + 0x05, // Expected result push + 0xed, 0xee, 0xee, 0xee, 0x00, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptMath, + expected: true, + }, + { + name: "sub", + pkScript: []byte{ + 0x94, // OP_SUB + 0x04, // Expected result push + 0x11, 0x11, 0x11, 0x11, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptMath, + expected: true, + }, + { + name: "mul", + pkScript: []byte{ + 0x95, // OP_MUL + 0x04, // Expected result push + 0xee, 0xee, 0xee, 0xee, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptMath, + expected: true, + }, + { + name: "div", + pkScript: []byte{ + 0x96, // OP_DIV + 0x51, // Expected result push + 0x87, // OP_EQUAL + }, + sigScript: sigScriptMath, + expected: true, + }, + { + name: "mod", + pkScript: []byte{ + 0x97, // OP_MOD + 0x04, // Expected result push + 0x11, 0x11, 0x11, 0x11, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptMath, + expected: true, + }, + { + name: "lshift", + pkScript: []byte{ + 0x98, // OP_LSHIFT + 0x01, // Expected result push + 0x88, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptShift, + expected: true, + }, + { + name: "rshift", + pkScript: []byte{ + 0x99, // OP_RSHIFT + 0x04, // Expected result push + 0xff, 0xff, 0xff, 0x0f, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptShift, + expected: true, + }, + { + name: "rotr", + pkScript: []byte{ + 0x89, // OP_ROTR + 0x04, // Expected result push + 0x44, 0x82, 0xc6, 0x2a, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptRot, + expected: true, + }, + { + name: "rotl", + pkScript: []byte{ + 0x8a, // OP_ROTL + 0x04, // Expected result push + 0xf6, 0x6e, 0x5f, 0xce, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptRot, + expected: true, + }, + { + name: "inv", + pkScript: []byte{ + 0x83, // OP_INV + 0x04, // Expected result push + 0x00, 0x01, 0xf0, 0x8f, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptInv, + expected: true, + }, + { + name: "and", + pkScript: []byte{ + 0x84, // OP_AND + 0x03, // Expected result push + 0x21, 0x02, 0x34, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptLogic, + expected: true, + }, + { + name: "or", + pkScript: []byte{ + 0x85, // OP_OR + 0x04, // Expected result push + 0x0f, 0xe0, 0x00, 0xa9, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptLogic, + expected: true, + }, + { + name: "xor", + pkScript: []byte{ + 0x86, // OP_XOR + 0x04, // Expected result push + 0x30, 0xe2, 0x34, 0xa9, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptLogic, + expected: true, + }, + { + name: "cat", + pkScript: []byte{ + 0x7e, // OP_CAT + 0x0c, // Expected result push + 0x21, 0x12, 0x34, 0x56, 0x44, 0x55, + 0x0f, 0xf0, 0x00, 0xff, 0x88, 0x99, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptCat, + expected: true, + }, + { + name: "catoverflow", + pkScript: []byte{ + 0x7e, // OP_CAT + 0x0c, // Expected result push + 0x21, 0x12, 0x34, 0x56, 0x44, 0x55, + 0x0f, 0xf0, 0x00, 0xff, 0x88, 0x99, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptCatOverflow, + expected: false, + }, + { + name: "substr", + pkScript: []byte{ + 0x7f, // OP_SUBSTR + 0x04, // Expected result push + 0x34, 0x56, 0x59, 0x32, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptSubstr, + expected: true, + }, + { + name: "left", + pkScript: []byte{ + 0x80, // OP_LEFT + 0x04, // Expected result push + 0x21, 0x12, 0x34, 0x56, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptLR, + expected: true, + }, + { + name: "right", + pkScript: []byte{ + 0x81, // OP_RIGHT + 0x04, // Expected result push + 0x59, 0x32, 0x40, 0x21, + 0x87, // OP_EQUAL + }, + sigScript: sigScriptLR, + expected: true, + }, + } + + for _, test := range tests { + msgTx := new(wire.MsgTx) + msgTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: wire.OutPoint{}, + SignatureScript: test.sigScript, + Sequence: 0xFFFFFFFF, + }) + msgTx.AddTxOut(&wire.TxOut{ + Value: 0x00FFFFFF00000000, + PkScript: []byte{0x01}, + }) + flags := StandardVerifyFlags + engine, err := NewEngine(test.pkScript, msgTx, 0, flags, 0) + if err != nil { + t.Errorf("Bad script result for test %v because of error: %v", + test.name, err.Error()) + continue + } + err = engine.Execute() + if err != nil && test.expected { + t.Errorf("Bad script exec for test %v because of error: %v", + test.name, err.Error()) + } + } +} + +func randByteSliceSlice(i int, maxLen int, src int) [][]byte { + r := rand.New(rand.NewSource(int64(src))) + + slices := make([][]byte, i, i) + for j := 0; j < i; j++ { + for { + sz := r.Intn(maxLen) + 1 + + sl := make([]byte, sz, sz) + for k := 0; k < sz; k++ { + randByte := r.Intn(255) + sl[k] = uint8(randByte) + } + + // No duplicates allowed. + if j > 0 && + (bytes.Equal(sl, slices[j-1])) { + r.Seed(int64(j) + r.Int63n(12345)) + continue + } + + slices[j] = sl + break + } + } + + return slices +} + +// TestForVMFailure feeds random scripts to the VMs to check and see if it +// crashes. Try increasing the number of iterations or the length of the +// byte string to sample a greater space. +func TestForVMFailure(t *testing.T) { + numTests := 2 + bsLength := 11 + + for i := 0; i < numTests; i++ { + tests := randByteSliceSlice(65536, bsLength, i) + + for j, _ := range tests { + if j == 0 { + continue + } + + msgTx := new(wire.MsgTx) + msgTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: wire.OutPoint{}, + SignatureScript: tests[j-1], + Sequence: 0xFFFFFFFF, + }) + msgTx.AddTxOut(&wire.TxOut{ + Value: 0x00FFFFFF00000000, + PkScript: []byte{0x01}, + }) + flags := StandardVerifyFlags + engine, err := NewEngine(tests[j], msgTx, 0, flags, 0) + + if err == nil { + engine.Execute() + } + } + } +} diff --git a/txscript/reference_test.go b/txscript/reference_test.go index 50d5531a..eabc5fd6 100644 --- a/txscript/reference_test.go +++ b/txscript/reference_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -14,9 +15,10 @@ import ( "strings" "testing" - . "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg/chainhash" + . "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // testName returns a descriptive test name for the given reference test data. @@ -124,6 +126,8 @@ func parseScriptFlags(flagStr string) (ScriptFlags, error) { switch flag { case "": // Nothing. + case "CHECKLOCKTIMEVERIFY": + flags |= ScriptVerifyCheckLockTimeVerify case "CLEANSTACK": flags |= ScriptVerifyCleanStack case "DERSIG": @@ -136,8 +140,6 @@ func parseScriptFlags(flagStr string) (ScriptFlags, error) { flags |= ScriptVerifyMinimalData case "NONE": // Nothing. - case "NULLDUMMY": - flags |= ScriptStrictMultiSig case "P2SH": flags |= ScriptBip16 case "SIGPUSHONLY": @@ -156,7 +158,8 @@ func parseScriptFlags(flagStr string) (ScriptFlags, error) { func createSpendingTx(sigScript, pkScript []byte) *wire.MsgTx { coinbaseTx := wire.NewMsgTx() - outPoint := wire.NewOutPoint(&wire.ShaHash{}, ^uint32(0)) + outPoint := wire.NewOutPoint(&chainhash.Hash{}, ^uint32(0), + dcrutil.TxTreeRegular) txIn := wire.NewTxIn(outPoint, []byte{OP_0, OP_0}) txOut := wire.NewTxOut(0, pkScript) coinbaseTx.AddTxIn(txIn) @@ -164,7 +167,8 @@ func createSpendingTx(sigScript, pkScript []byte) *wire.MsgTx { spendingTx := wire.NewMsgTx() coinbaseTxSha := coinbaseTx.TxSha() - outPoint = wire.NewOutPoint(&coinbaseTxSha, 0) + outPoint = wire.NewOutPoint(&coinbaseTxSha, 0, + dcrutil.TxTreeRegular) txIn = wire.NewTxIn(outPoint, sigScript) txOut = wire.NewTxOut(0, nil) @@ -176,6 +180,10 @@ func createSpendingTx(sigScript, pkScript []byte) *wire.MsgTx { // TestScriptInvalidTests ensures all of the tests in script_invalid.json fail // as expected. +// TODO These tests need to be completely regenerated and should really be +// dynamically created. Most of them are failing because they use Bitcoin's +// sighash algorithm to create their signatures, and thus fail for completely +// wrong reasons compared to what they're supposed to test. func TestScriptInvalidTests(t *testing.T) { file, err := ioutil.ReadFile("data/script_invalid.json") if err != nil { @@ -217,7 +225,7 @@ func TestScriptInvalidTests(t *testing.T) { continue } tx := createSpendingTx(scriptSig, scriptPubKey) - vm, err := NewEngine(scriptPubKey, tx, 0, flags) + vm, err := NewEngine(scriptPubKey, tx, 0, flags, 0) if err == nil { if err := vm.Execute(); err == nil { t.Errorf("%s test succeeded when it "+ @@ -271,14 +279,14 @@ func TestScriptValidTests(t *testing.T) { continue } tx := createSpendingTx(scriptSig, scriptPubKey) - vm, err := NewEngine(scriptPubKey, tx, 0, flags) + vm, err := NewEngine(scriptPubKey, tx, 0, flags, 0) if err != nil { t.Errorf("%s failed to create script: %v", name, err) continue } err = vm.Execute() if err != nil { - t.Errorf("%s failed to execute: %v", name, err) + t.Errorf("test %v:%s failed to execute: %v", i, name, err) continue } } @@ -330,7 +338,7 @@ testloop: continue } - tx, err := btcutil.NewTxFromBytes(serializedTx) + tx, err := dcrutil.NewTxFromBytesLegacy(serializedTx) if err != nil { t.Errorf("bad test (arg 2 not msgtx %v) %d: %v", err, i, test) @@ -371,7 +379,7 @@ testloop: continue testloop } - prevhash, err := wire.NewShaHashFromStr(previoustx) + prevhash, err := chainhash.NewHashFromStr(previoustx) if err != nil { t.Errorf("bad test (%dth input sha not sha %v)"+ "%d: %v", j, err, i, test) @@ -401,7 +409,7 @@ testloop: continue testloop } - prevOuts[*wire.NewOutPoint(prevhash, idx)] = script + prevOuts[*wire.NewOutPoint(prevhash, idx, dcrutil.TxTreeRegular)] = script } for k, txin := range tx.MsgTx().TxIn { @@ -414,7 +422,7 @@ testloop: // These are meant to fail, so as soon as the first // input fails the transaction has failed. (some of the // test txns have good inputs, too.. - vm, err := NewEngine(pkScript, tx.MsgTx(), k, flags) + vm, err := NewEngine(pkScript, tx.MsgTx(), k, flags, 0) if err != nil { continue testloop } @@ -474,7 +482,7 @@ testloop: continue } - tx, err := btcutil.NewTxFromBytes(serializedTx) + tx, err := dcrutil.NewTxFromBytesLegacy(serializedTx) if err != nil { t.Errorf("bad test (arg 2 not msgtx %v) %d: %v", err, i, test) @@ -515,7 +523,7 @@ testloop: continue } - prevhash, err := wire.NewShaHashFromStr(previoustx) + prevhash, err := chainhash.NewHashFromStr(previoustx) if err != nil { t.Errorf("bad test (%dth input sha not sha %v)"+ "%d: %v", j, err, i, test) @@ -545,7 +553,7 @@ testloop: continue } - prevOuts[*wire.NewOutPoint(prevhash, idx)] = script + prevOuts[*wire.NewOutPoint(prevhash, idx, dcrutil.TxTreeRegular)] = script } for k, txin := range tx.MsgTx().TxIn { @@ -555,7 +563,7 @@ testloop: k, i, test) continue testloop } - vm, err := NewEngine(pkScript, tx.MsgTx(), k, flags) + vm, err := NewEngine(pkScript, tx.MsgTx(), k, flags, 0) if err != nil { t.Errorf("test (%d:%v:%d) failed to create "+ "script: %v", i, test, k, err) diff --git a/txscript/script.go b/txscript/script.go index 333774c6..a906acf9 100644 --- a/txscript/script.go +++ b/txscript/script.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,16 +9,12 @@ import ( "bytes" "encoding/binary" "fmt" - "time" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) -// Bip16Activation is the timestamp where BIP0016 is valid to use in the -// blockchain. To be used to determine if BIP0016 should be called for or not. -// This timestamp corresponds to Sun Apr 1 00:00:00 UTC 2012. -var Bip16Activation = time.Unix(1333238400, 0) - // SigHashType represents hash type bits at the end of a signature. type SigHashType byte @@ -27,6 +24,7 @@ const ( SigHashAll SigHashType = 0x1 SigHashNone SigHashType = 0x2 SigHashSingle SigHashType = 0x3 + SigHashAllValue SigHashType = 0x4 SigHashAnyOneCanPay SigHashType = 0x80 // sigHashMask defines the number of bits of the hash type which is used @@ -36,9 +34,9 @@ const ( // These are the constants specified for maximums in individual scripts. const ( - MaxOpsPerScript = 201 // Max number of non-push operations. - MaxPubKeysPerMultiSig = 20 // Multisig can't have more sigs than this. - MaxScriptElementSize = 520 // Max bytes pushable to the stack. + MaxOpsPerScript = 255 // Max number of non-push operations. + MaxPubKeysPerMultiSig = 20 // Multisig can't have more sigs than this. + MaxScriptElementSize = 2048 // Max bytes pushable to the stack. ) // isSmallInt returns whether or not the opcode is considered a small integer, @@ -50,15 +48,6 @@ func isSmallInt(op *opcode) bool { return false } -// isScriptHash returns true if the script passed is a pay-to-script-hash -// transaction, false otherwise. -func isScriptHash(pops []parsedOpcode) bool { - return len(pops) == 3 && - pops[0].opcode.value == OP_HASH160 && - pops[1].opcode.value == OP_DATA_20 && - pops[2].opcode.value == OP_EQUAL -} - // IsPayToScriptHash returns true if the script is in the standard // pay-to-script-hash (P2SH) format, false otherwise. func IsPayToScriptHash(script []byte) bool { @@ -98,10 +87,40 @@ func IsPushOnlyScript(script []byte) bool { return isPushOnly(pops) } +func HasP2SHScriptSigStakeOpCodes(version uint16, scriptSig, + scriptPubKey []byte) error { + class := GetScriptClass(version, scriptPubKey) + if IsStakeOutput(scriptPubKey) { + class, _ = GetStakeOutSubclass(scriptPubKey) + } + if class == ScriptHashTy { + // Obtain the embedded pkScript from the scriptSig of the + // current transaction. Then, ensure that it does not use + // any stake tagging OP codes. + shScript, err := GetPkScriptFromP2SHSigScript(scriptSig) + if err != nil { + return fmt.Errorf("unexpected error retrieving pkscript "+ + "from p2sh transaction: %v", err.Error()) + } + + hasStakeOpCodes, err := ContainsStakeOpCodes(shScript) + if err != nil { + return fmt.Errorf("unexpected error checking pkscript "+ + "from p2sh transaction: %v", err.Error()) + } + if hasStakeOpCodes { + return ErrP2SHStakeOpCodes + } + } + + return nil +} + // parseScriptTemplate is the same as parseScript but allows the passing of the // template list for testing purposes. When there are parse errors, it returns // the list of parsed opcodes up to the point of failure along with the error. -func parseScriptTemplate(script []byte, opcodes *[256]opcode) ([]parsedOpcode, error) { +func parseScriptTemplate(script []byte, opcodes *[256]opcode) ([]parsedOpcode, + error) { retScript := make([]parsedOpcode, 0, len(script)) for i := 0; i < len(script); { instr := script[i] @@ -172,6 +191,11 @@ func parseScriptTemplate(script []byte, opcodes *[256]opcode) ([]parsedOpcode, e return retScript, nil } +// ParseScript is an exported version for testing. +func ParseScript(script []byte) ([]parsedOpcode, error) { + return parseScriptTemplate(script, &opcodeArray) +} + // parseScript preparses the script in bytes into a list of parsedOpcodes while // applying a number of sanity checks. func parseScript(script []byte) ([]parsedOpcode, error) { @@ -263,10 +287,17 @@ func removeOpcodeByData(pkscript []parsedOpcode, data []byte) []parsedOpcode { } +// CalcSignatureHash is an exported version for testing. +func CalcSignatureHash(script []parsedOpcode, hashType SigHashType, + tx *wire.MsgTx, idx int, cachedPrefix *chainhash.Hash) ([]byte, error) { + return calcSignatureHash(script, hashType, tx, idx, cachedPrefix) +} + // calcSignatureHash will, given a script and hash type for the current script // engine instance, calculate the signature hash to be used for signing and // verification. -func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *wire.MsgTx, idx int) []byte { +func calcSignatureHash(script []parsedOpcode, hashType SigHashType, + tx *wire.MsgTx, idx int, cachedPrefix *chainhash.Hash) ([]byte, error) { // The SigHashSingle signature type signs only the corresponding input // and output (the output with the same index number as the input). // @@ -287,10 +318,10 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *wire.Msg // hash of 1. This in turn presents an opportunity for attackers to // cleverly construct transactions which can steal those coins provided // they can reuse signatures. + // + // Decred mitigates this by actually returning an error instead. if hashType&sigHashMask == SigHashSingle && idx >= len(tx.TxOut) { - var hash wire.ShaHash - hash[0] = 0x01 - return hash[:] + return nil, ErrSighashSingleIdx } // Remove all instances of OP_CODESEPARATOR from the script. @@ -352,6 +383,8 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *wire.Msg fallthrough case SigHashOld: fallthrough + case SigHashAllValue: + fallthrough case SigHashAll: // Nothing special here. } @@ -360,13 +393,41 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *wire.Msg idx = 0 } - // The final hash is the double sha256 of both the serialized modified - // transaction and the hash type (encoded as a 4-byte little-endian - // value) appended. + // The final hash (message to sign) is the hash of: + // 1) hash of the prefix || + // 2) hash of the witness for signing || + // 3) the hash type (encoded as a 4-byte little-endian value) var wbuf bytes.Buffer - txCopy.Serialize(&wbuf) binary.Write(&wbuf, binary.LittleEndian, uint32(hashType)) - return wire.DoubleSha256(wbuf.Bytes()) + + // Optimization for SIGHASH_ALL. In this case, the prefix hash is + // the same as the transaction hash because only the inputs have + // been modified, so don't bother to do the wasteful O(N^2) extra + // hash here. + // The caching only works if the "anyone can pay flag" is also + // disabled. + var prefixHash chainhash.Hash + if cachedPrefix != nil && + (hashType&sigHashMask == SigHashAll) && + (hashType&SigHashAnyOneCanPay == 0) && + chaincfg.SigHashOptimization { + prefixHash = *cachedPrefix + } else { + prefixHash = txCopy.TxSha() + } + + // If the ValueIn is to be included in what we're signing, sign + // the witness hash that includes it. Otherwise, just sign the + // prefix and signature scripts. + var witnessHash chainhash.Hash + if hashType&sigHashMask != SigHashAllValue { + witnessHash = txCopy.TxShaWitnessSigning() + } else { + witnessHash = txCopy.TxShaWitnessValueSigning() + } + wbuf.Write(prefixHash.Bytes()) + wbuf.Write(witnessHash.Bytes()) + return chainhash.HashFuncB(wbuf.Bytes()), nil } // asSmallInt returns the passed opcode, which must be true according to @@ -390,6 +451,10 @@ func getSigOpCount(pops []parsedOpcode, precise bool) int { case OP_CHECKSIG: fallthrough case OP_CHECKSIGVERIFY: + fallthrough + case OP_CHECKSIGALT: + fallthrough + case OP_CHECKSIGALTVERIFY: nSigs++ case OP_CHECKMULTISIG: fallthrough diff --git a/txscript/script_test.go b/txscript/script_test.go index 21b88af7..afc11f09 100644 --- a/txscript/script_test.go +++ b/txscript/script_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,10 +7,13 @@ package txscript_test import ( "bytes" + "encoding/hex" "reflect" "testing" - "github.com/btcsuite/btcd/txscript" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" ) // TestPushedData ensured the PushedData function extracts the expected data out @@ -292,6 +296,24 @@ func TestRemoveOpcodeByData(t *testing.T) { remove: []byte{1, 2, 3, 5}, after: []byte{txscript.OP_DATA_4, 1, 2, 3, 4}, }, + // fix to allow for decred tests too + /* + { + name: "stakesubmission", + scriptclass: txscript.StakeSubmissionTy, + stringed: "stakesubmission", + }, + { + name: "stakegen", + scriptclass: txscript.StakeGenTy, + stringed: "stakegen", + }, + { + name: "stakerevoke", + scriptclass: txscript.StakeRevocationTy, + stringed: "stakerevoke", + }, + */ { // padded to keep it canonical. name: "simple case (pushdata1)", @@ -367,9 +389,9 @@ func TestRemoveOpcodeByData(t *testing.T) { }, { name: "invalid opcode ", - before: []byte{txscript.OP_UNKNOWN187}, + before: []byte{txscript.OP_UNKNOWN192}, remove: []byte{1, 2, 3, 4}, - after: []byte{txscript.OP_UNKNOWN187}, + after: []byte{txscript.OP_UNKNOWN192}, }, { name: "invalid length (instruction)", @@ -486,3 +508,71 @@ func TestIsPushOnlyScript(t *testing.T) { "%v", test.name, true, test.expected) } } + +// TestCalcSignatureHash does some rudimentary testing of msg hash calculation. +func TestCalcSignatureHash(t *testing.T) { + tx := new(wire.MsgTx) + for i := 0; i < 3; i++ { + txIn := new(wire.TxIn) + txIn.Sequence = 0xFFFFFFFF + txIn.PreviousOutPoint.Hash = chainhash.HashFuncH([]byte{byte(i)}) + txIn.PreviousOutPoint.Index = uint32(i) + txIn.PreviousOutPoint.Tree = int8(0) + tx.AddTxIn(txIn) + } + for i := 0; i < 2; i++ { + txOut := new(wire.TxOut) + txOut.PkScript = []byte{0x01, 0x01, 0x02, 0x03} + txOut.Value = 0x0000FF00FF00FF00 + tx.AddTxOut(txOut) + } + + want, _ := hex.DecodeString("d09285b6f60c71329323bc2e76c48" + + "a462cde4e1032aa8f59c55823f1722c7f4a") + pops, _ := txscript.ParseScript([]byte{0x01, 0x01, 0x02, 0x03}) + + // Test prefix caching. + msg1, err := txscript.CalcSignatureHash(pops, txscript.SigHashAll, tx, 0, nil) + if err != nil { + t.Fatalf("unexpected error %v", err.Error()) + } + + prefixHash := tx.TxSha() + msg2, err := txscript.CalcSignatureHash(pops, txscript.SigHashAll, tx, 0, + &prefixHash) + if err != nil { + t.Fatalf("unexpected error %v", err.Error()) + } + + if !bytes.Equal(msg1, want) { + t.Errorf("for sighash all sig noncached wrong msg %x given, want %x", + msg1, + want) + } + if !bytes.Equal(msg2, want) { + t.Errorf("for sighash all sig cached wrong msg %x given, want %x", + msg1, + want) + } + if !bytes.Equal(msg1, msg2) { + t.Errorf("for sighash all sig non-equivalent msgs %x and %x were "+ + "returned when using a cached prefix", + msg1, + msg2) + } + + // Move the index and make sure that we get a whole new hash, despite + // using the same TxOuts. + msg3, err := txscript.CalcSignatureHash(pops, txscript.SigHashAll, tx, 1, + &prefixHash) + if err != nil { + t.Fatalf("unexpected error %v", err.Error()) + } + + if bytes.Equal(msg1, msg3) { + t.Errorf("for sighash all sig equivalent msgs %x and %x were "+ + "returned when using a cached prefix but different indices", + msg1, + msg3) + } +} diff --git a/txscript/scriptbuilder.go b/txscript/scriptbuilder.go index bf5e91f7..db945c4d 100644 --- a/txscript/scriptbuilder.go +++ b/txscript/scriptbuilder.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/txscript/scriptbuilder_test.go b/txscript/scriptbuilder_test.go index 677c0cf0..bcd12440 100644 --- a/txscript/scriptbuilder_test.go +++ b/txscript/scriptbuilder_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "bytes" "testing" - "github.com/btcsuite/btcd/txscript" + "github.com/decred/dcrd/txscript" ) // TestScriptBuilderAddOp tests that pushing opcodes to a script via the @@ -212,7 +213,7 @@ func TestScriptBuilderAddData(t *testing.T) { // other operators. { name: "push data len 521", - data: bytes.Repeat([]byte{0x49}, 521), + data: bytes.Repeat([]byte{0x49}, 4097), expected: nil, }, { diff --git a/txscript/scriptnum.go b/txscript/scriptnum.go index c901fe2c..7c62aac2 100644 --- a/txscript/scriptnum.go +++ b/txscript/scriptnum.go @@ -1,4 +1,5 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,9 +9,13 @@ const ( maxInt32 = 1<<31 - 1 minInt32 = -1 << 31 - // maxScriptNumLen is the maximum number of bytes data being interpreted - // as an integer may be. - maxScriptNumLen = 4 + // mathOpCodeMaxScriptNumLen is the maximum number of bytes data being + // interpreted as an integer may be for the majority of op codes. + mathOpCodeMaxScriptNumLen = 4 + + // altSigSuitesMaxscriptNumLen is the maximum number of bytes for the + // type of alternative signature suite + altSigSuitesMaxscriptNumLen = 1 ) // scriptNum represents a numeric value used in the scripting engine with @@ -36,9 +41,9 @@ const ( // // Then, whenever data is interpreted as an integer, it is converted to this // type by using the makeScriptNum function which will return an error if the -// number is out of range (or not minimally encoded depending on a flag). Since -// all numeric opcodes involve pulling data from the stack and interpreting it -// as an integer, it provides the required behavior. +// number is out of range or not minimally encoded depending on parameters. +// Since all numeric opcodes involve pulling data from the stack and +// interpreting it as an integer, it provides the required behavior. type scriptNum int64 // checkMinimalDataEncoding returns whether or not the passed byte array adheres @@ -132,11 +137,12 @@ func (n scriptNum) Bytes() []byte { // and the consensus rules dictate numbers which are directly cast to ints // provide this behavior. // -// In practice, the number should never really be out of range since it will -// have been created with makeScriptNum which rejects them, but in case -// something in the future ends up calling this function against the result -// of some arithmetic, which IS allowed to be out of range before being -// reinterpreted as an integer, this will provide the correct behavior. +// In practice, for most opcodes, the number should never be out of range since +// it will have been created with makeScriptNum using the defaultScriptLen +// value, which rejects them. In case something in the future ends up calling +// this function against the result of some arithmetic, which IS allowed to be +// out of range before being reinterpreted as an integer, this will provide the +// correct behavior. func (n scriptNum) Int32() int32 { if n > maxInt32 { return maxInt32 @@ -152,10 +158,13 @@ func (n scriptNum) Int32() int32 { // makeScriptNum interprets the passed serialized bytes as an encoded integer // and returns the result as a script number. // -// Since the consensus rules dictate the serialized bytes interpreted as ints -// are only allowed to be in the range [-2^31 + 1, 2^31 - 1], an error will be -// returned when the provided bytes would result in a number outside of that -// range. +// Since the consensus rules dictate that serialized bytes interpreted as ints +// are only allowed to be in the range determined by a maximum number of bytes, +// on a per opcode basis, an error will be returned when the provided bytes +// would result in a number outside of that range. In particular, the range for +// the vast majority of opcodes dealing with numeric values are limited to 4 +// bytes and therefore will pass that value to this function resulting in an +// allowed range of [-2^31 + 1, 2^31 - 1]. // // The requireMinimal flag causes an error to be returned if additional checks // on the encoding determine it is not represented with the smallest possible @@ -164,11 +173,18 @@ func (n scriptNum) Int32() int32 { // [0x7f 0x00 0x00 ...], etc. All forms except [0x7f] will return an error with // requireMinimal enabled. // +// The scriptNumLen is the maximum number of bytes the encoded value can be +// before an ErrStackNumberTooBig is returned. This effectively limits the +// range of allowed values. +// WARNING: Great care should be taken if passing a value larger than +// defaultScriptNumLen, which could lead to addition and multiplication +// overflows. +// // See the Bytes function documentation for example encodings. -func makeScriptNum(v []byte, requireMinimal bool) (scriptNum, error) { - // Interpreting data as an integer requires that it is not larger than - // a 32-bit integer. - if len(v) > maxScriptNumLen { +func makeScriptNum(v []byte, requireMinimal bool, scriptNumLen int) (scriptNum, error) { + // Interpreting data requires that it is not larger than + // the the passed scriptNumLen value. + if len(v) > scriptNumLen { return 0, ErrStackNumberTooBig } diff --git a/txscript/scriptnum_test.go b/txscript/scriptnum_test.go index 5bdd4a3a..4bd79ed2 100644 --- a/txscript/scriptnum_test.go +++ b/txscript/scriptnum_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -177,7 +178,8 @@ func TestMakeScriptNum(t *testing.T) { } for _, test := range tests { - gotNum, err := makeScriptNum(test.serialized, test.minimalEncoding) + gotNum, err := makeScriptNum(test.serialized, test.minimalEncoding, + mathOpCodeMaxScriptNumLen) if err != test.err { t.Errorf("makeScriptNum: did not received expected "+ "error for %x - got %v, want %v", diff --git a/txscript/sign.go b/txscript/sign.go index df8afeb3..ba653e43 100644 --- a/txscript/sign.go +++ b/txscript/sign.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,31 +9,72 @@ import ( "errors" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // RawTxInSignature returns the serialized ECDSA signature for the input idx of // the given transaction, with hashType appended to it. func RawTxInSignature(tx *wire.MsgTx, idx int, subScript []byte, - hashType SigHashType, key *btcec.PrivateKey) ([]byte, error) { + hashType SigHashType, key chainec.PrivateKey) ([]byte, error) { parsedScript, err := parseScript(subScript) if err != nil { return nil, fmt.Errorf("cannot parse output script: %v", err) } - hash := calcSignatureHash(parsedScript, hashType, tx, idx) - signature, err := key.Sign(hash) + hash, err := calcSignatureHash(parsedScript, hashType, tx, idx, nil) + if err != nil { + return nil, err + } + + r, s, err := chainec.Secp256k1.Sign(key, hash) if err != nil { return nil, fmt.Errorf("cannot sign tx input: %s", err) } + sig := chainec.Secp256k1.NewSignature(r, s) - return append(signature.Serialize(), byte(hashType)), nil + return append(sig.Serialize(), byte(hashType)), nil } -// SignatureScript creates an input signature script for tx to spend BTC sent +// RawTxInSignatureAlt returns the serialized ECDSA signature for the input idx of +// the given transaction, with hashType appended to it. +func RawTxInSignatureAlt(tx *wire.MsgTx, idx int, subScript []byte, + hashType SigHashType, key chainec.PrivateKey, sigType sigTypes) ([]byte, + error) { + + parsedScript, err := parseScript(subScript) + if err != nil { + return nil, fmt.Errorf("cannot parse output script: %v", err) + } + hash, err := calcSignatureHash(parsedScript, hashType, tx, idx, nil) + if err != nil { + return nil, err + } + + var sig chainec.Signature + switch sigType { + case edwards: + r, s, err := chainec.Edwards.Sign(key, hash) + if err != nil { + return nil, fmt.Errorf("cannot sign tx input: %s", err) + } + sig = chainec.Edwards.NewSignature(r, s) + case secSchnorr: + r, s, err := chainec.SecSchnorr.Sign(key, hash) + if err != nil { + return nil, fmt.Errorf("cannot sign tx input: %s", err) + } + sig = chainec.SecSchnorr.NewSignature(r, s) + default: + return nil, fmt.Errorf("unknown alt sig type %v", sigType) + } + + return append(sig.Serialize(), byte(hashType)), nil +} + +// SignatureScript creates an input signature script for tx to spend coins sent // from a previous output to the owner of privKey. tx must include all // transaction inputs and outputs, however txin scripts are allowed to be filled // or empty. The returned script is calculated to be used as the idx'th txin @@ -40,24 +82,60 @@ func RawTxInSignature(tx *wire.MsgTx, idx int, subScript []byte, // as the idx'th input. privKey is serialized in either a compressed or // uncompressed format based on compress. This format must match the same format // used to generate the payment address, or the script validation will fail. -func SignatureScript(tx *wire.MsgTx, idx int, subscript []byte, hashType SigHashType, privKey *btcec.PrivateKey, compress bool) ([]byte, error) { +func SignatureScript(tx *wire.MsgTx, idx int, subscript []byte, + hashType SigHashType, privKey chainec.PrivateKey, compress bool) ([]byte, + error) { sig, err := RawTxInSignature(tx, idx, subscript, hashType, privKey) if err != nil { return nil, err } - pk := (*btcec.PublicKey)(&privKey.PublicKey) + pubx, puby := privKey.Public() + pub := chainec.Secp256k1.NewPublicKey(pubx, puby) var pkData []byte if compress { - pkData = pk.SerializeCompressed() + pkData = pub.SerializeCompressed() } else { - pkData = pk.SerializeUncompressed() + pkData = pub.SerializeUncompressed() } return NewScriptBuilder().AddData(sig).AddData(pkData).Script() } -func p2pkSignatureScript(tx *wire.MsgTx, idx int, subScript []byte, hashType SigHashType, privKey *btcec.PrivateKey) ([]byte, error) { +// SignatureScriptAlt creates an input signature script for tx to spend coins sent +// from a previous output to the owner of privKey. tx must include all +// transaction inputs and outputs, however txin scripts are allowed to be filled +// or empty. The returned script is calculated to be used as the idx'th txin +// sigscript for tx. subscript is the PkScript of the previous output being used +// as the idx'th input. privKey is serialized in the respective format for the +// ECDSA type. This format must match the same format used to generate the payment +// address, or the script validation will fail. +func SignatureScriptAlt(tx *wire.MsgTx, idx int, subscript []byte, + hashType SigHashType, privKey chainec.PrivateKey, compress bool, + sigType int) ([]byte, + error) { + sig, err := RawTxInSignatureAlt(tx, idx, subscript, hashType, privKey, + sigTypes(sigType)) + if err != nil { + return nil, err + } + + pubx, puby := privKey.Public() + var pub chainec.PublicKey + switch sigTypes(sigType) { + case edwards: + pub = chainec.Edwards.NewPublicKey(pubx, puby) + case secSchnorr: + pub = chainec.SecSchnorr.NewPublicKey(pubx, puby) + } + pkData := pub.Serialize() + + return NewScriptBuilder().AddData(sig).AddData(pkData).Script() +} + +// p2pkSignatureScript constructs a pay-to-pubkey signature script. +func p2pkSignatureScript(tx *wire.MsgTx, idx int, subScript []byte, + hashType SigHashType, privKey chainec.PrivateKey) ([]byte, error) { sig, err := RawTxInSignature(tx, idx, subScript, hashType, privKey) if err != nil { return nil, err @@ -66,16 +144,28 @@ func p2pkSignatureScript(tx *wire.MsgTx, idx int, subScript []byte, hashType Sig return NewScriptBuilder().AddData(sig).Script() } +// p2pkSignatureScript constructs a pay-to-pubkey signature script for alternative +// ECDSA types. +func p2pkSignatureScriptAlt(tx *wire.MsgTx, idx int, subScript []byte, + hashType SigHashType, privKey chainec.PrivateKey, sigType sigTypes) ([]byte, + error) { + sig, err := RawTxInSignatureAlt(tx, idx, subScript, hashType, privKey, + sigType) + if err != nil { + return nil, err + } + + return NewScriptBuilder().AddData(sig).Script() +} + // signMultiSig signs as many of the outputs in the provided multisig script as // possible. It returns the generated script and a boolean if the script fulfils // the contract (i.e. nrequired signatures are provided). Since it is arguably // legal to not be able to sign any of the outputs, no error is returned. func signMultiSig(tx *wire.MsgTx, idx int, subScript []byte, hashType SigHashType, - addresses []btcutil.Address, nRequired int, kdb KeyDB) ([]byte, bool) { - // We start with a single OP_FALSE to work around the (now standard) - // but in the reference implementation that causes a spurious pop at - // the end of OP_CHECKMULTISIG. - builder := NewScriptBuilder().AddOp(OP_FALSE) + addresses []dcrutil.Address, nRequired int, kdb KeyDB) ([]byte, bool) { + // No need to add dummy in Decred. + builder := NewScriptBuilder() signed := 0 for _, addr := range addresses { key, _, err := kdb.GetKey(addr) @@ -92,23 +182,73 @@ func signMultiSig(tx *wire.MsgTx, idx int, subScript []byte, hashType SigHashTyp if signed == nRequired { break } - } script, _ := builder.Script() return script, signed == nRequired } -func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, - subScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB) ([]byte, - ScriptClass, []btcutil.Address, int, error) { +// handleStakeOutSign is a convenience function for reducing code clutter in +// sign. It handles the signing of stake outputs. +func handleStakeOutSign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, + subScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB, + addresses []dcrutil.Address, class ScriptClass, subClass ScriptClass, + nrequired int) ([]byte, ScriptClass, []dcrutil.Address, int, error) { - class, addresses, nrequired, err := ExtractPkScriptAddrs(subScript, - chainParams) + // look up key for address + switch subClass { + case PubKeyHashTy: + key, compressed, err := kdb.GetKey(addresses[0]) + if err != nil { + return nil, class, nil, 0, err + } + txscript, err := SignatureScript(tx, idx, subScript, hashType, + key, compressed) + if err != nil { + return nil, class, nil, 0, err + } + return txscript, class, addresses, nrequired, nil + case ScriptHashTy: + script, err := sdb.GetScript(addresses[0]) + if err != nil { + return nil, class, nil, 0, err + } + + return script, class, addresses, nrequired, nil + } + + return nil, class, nil, 0, fmt.Errorf("unknown subclass for stake output " + + "to sign") +} + +// sign is the main signing workhorse. It takes a script, its input transaction, +// its input index, a database of keys, a database of scripts, and information +// about the type of signature and returns a signature, script class, the +// addresses involved, and the number of signatures required. +func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, + subScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB, + sigType sigTypes) ([]byte, + ScriptClass, []dcrutil.Address, int, error) { + + class, addresses, nrequired, err := ExtractPkScriptAddrs(DefaultScriptVersion, + subScript, chainParams) if err != nil { return nil, NonStandardTy, nil, 0, err } + subClass := class + isStakeType := class == StakeSubmissionTy || + class == StakeSubChangeTy || + class == StakeGenTy || + class == StakeRevocationTy + if isStakeType { + subClass, err = GetStakeOutSubclass(subScript) + if err != nil { + return nil, 0, nil, 0, + fmt.Errorf("unknown stake output subclass encountered") + } + } + switch class { case PubKeyTy: // look up key for address @@ -124,6 +264,22 @@ func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, } return script, class, addresses, nrequired, nil + + case PubkeyAltTy: + // look up key for address + key, _, err := kdb.GetKey(addresses[0]) + if err != nil { + return nil, class, nil, 0, err + } + + script, err := p2pkSignatureScriptAlt(tx, idx, subScript, hashType, + key, sigType) + if err != nil { + return nil, class, nil, 0, err + } + + return script, class, addresses, nrequired, nil + case PubKeyHashTy: // look up key for address key, compressed, err := kdb.GetKey(addresses[0]) @@ -138,6 +294,22 @@ func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, } return script, class, addresses, nrequired, nil + + case PubkeyHashAltTy: + // look up key for address + key, compressed, err := kdb.GetKey(addresses[0]) + if err != nil { + return nil, class, nil, 0, err + } + + script, err := SignatureScriptAlt(tx, idx, subScript, hashType, + key, compressed, int(sigType)) + if err != nil { + return nil, class, nil, 0, err + } + + return script, class, addresses, nrequired, nil + case ScriptHashTy: script, err := sdb.GetScript(addresses[0]) if err != nil { @@ -145,13 +317,32 @@ func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, } return script, class, addresses, nrequired, nil + case MultiSigTy: script, _ := signMultiSig(tx, idx, subScript, hashType, addresses, nrequired, kdb) return script, class, addresses, nrequired, nil + + case StakeSubmissionTy: + return handleStakeOutSign(chainParams, tx, idx, subScript, hashType, kdb, + sdb, addresses, class, subClass, nrequired) + + case StakeGenTy: + return handleStakeOutSign(chainParams, tx, idx, subScript, hashType, kdb, + sdb, addresses, class, subClass, nrequired) + + case StakeRevocationTy: + return handleStakeOutSign(chainParams, tx, idx, subScript, hashType, kdb, + sdb, addresses, class, subClass, nrequired) + + case StakeSubChangeTy: + return handleStakeOutSign(chainParams, tx, idx, subScript, hashType, kdb, + sdb, addresses, class, subClass, nrequired) + case NullDataTy: return nil, class, nil, 0, errors.New("can't sign NULLDATA transactions") + default: return nil, class, nil, 0, errors.New("can't sign unknown transactions") @@ -165,7 +356,7 @@ func sign(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, // function with addresses, class and nrequired that do not match pkScript is // an error and results in undefined behaviour. func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, - pkScript []byte, class ScriptClass, addresses []btcutil.Address, + pkScript []byte, class ScriptClass, addresses []dcrutil.Address, nRequired int, sigScript, prevScript []byte) []byte { // TODO(oga) the scripthash and multisig paths here are overly @@ -191,7 +382,7 @@ func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, // We already know this information somewhere up the stack. class, addresses, nrequired, err := - ExtractPkScriptAddrs(script, chainParams) + ExtractPkScriptAddrs(DefaultScriptVersion, script, chainParams) // regenerate scripts. sigScript, _ := unparseScript(sigPops) @@ -231,7 +422,7 @@ func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, // pkScript. Since this function is internal only we assume that the arguments // have come from other functions internally and thus are all consistent with // each other, behaviour is undefined if this contract is broken. -func mergeMultiSig(tx *wire.MsgTx, idx int, addresses []btcutil.Address, +func mergeMultiSig(tx *wire.MsgTx, idx int, addresses []dcrutil.Address, nRequired int, pkScript, sigScript, prevScript []byte) []byte { // This is an internal only function and we already parsed this script @@ -281,7 +472,7 @@ sigLoop: tSig := sig[:len(sig)-1] hashType := SigHashType(sig[len(sig)-1]) - pSig, err := btcec.ParseDERSignature(tSig, btcec.S256()) + pSig, err := chainec.Secp256k1.ParseDERSignature(tSig) if err != nil { continue } @@ -291,20 +482,27 @@ sigLoop: // however, assume no sigs etc are in the script since that // would make the transaction nonstandard and thus not // MultiSigTy, so we just need to hash the full thing. - hash := calcSignatureHash(pkPops, hashType, tx, idx) + hash, err := calcSignatureHash(pkPops, hashType, tx, idx, nil) + if err != nil { + // Decred -- is this the right handling for SIGHASH_SINGLE error ? + // TODO make sure this doesn't break anything. + continue + } for _, addr := range addresses { // All multisig addresses should be pubkey addreses // it is an error to call this internal function with // bad input. - pkaddr := addr.(*btcutil.AddressPubKey) + pkaddr := addr.(*dcrutil.AddressSecpPubKey) pubKey := pkaddr.PubKey() // If it matches we put it in the map. We only // can take one signature per public key so if we // already have one, we can throw this away. - if pSig.Verify(hash, pubKey) { + r := pSig.GetR() + s := pSig.GetS() + if chainec.Secp256k1.Verify(pubKey, hash, r, s) { aStr := addr.EncodeAddress() if _, ok := addrToSig[aStr]; !ok { addrToSig[aStr] = sig @@ -316,7 +514,7 @@ sigLoop: // Extra opcode to handle the extra arg consumed (due to previous bugs // in the reference implementation). - builder := NewScriptBuilder().AddOp(OP_FALSE) + builder := NewScriptBuilder() //.AddOp(OP_FALSE) doneSigs := 0 // This assumes that addresses are in the same order as in the script. for _, addr := range addresses { @@ -343,14 +541,14 @@ sigLoop: // KeyDB is an interface type provided to SignTxOutput, it encapsulates // any user state required to get the private keys for an address. type KeyDB interface { - GetKey(btcutil.Address) (*btcec.PrivateKey, bool, error) + GetKey(dcrutil.Address) (chainec.PrivateKey, bool, error) } // KeyClosure implements ScriptDB with a closure -type KeyClosure func(btcutil.Address) (*btcec.PrivateKey, bool, error) +type KeyClosure func(dcrutil.Address) (chainec.PrivateKey, bool, error) // GetKey implements KeyDB by returning the result of calling the closure -func (kc KeyClosure) GetKey(address btcutil.Address) (*btcec.PrivateKey, +func (kc KeyClosure) GetKey(address dcrutil.Address) (chainec.PrivateKey, bool, error) { return kc(address) } @@ -358,14 +556,14 @@ func (kc KeyClosure) GetKey(address btcutil.Address) (*btcec.PrivateKey, // ScriptDB is an interface type provided to SignTxOutput, it encapsulates any // user state required to get the scripts for an pay-to-script-hash address. type ScriptDB interface { - GetScript(btcutil.Address) ([]byte, error) + GetScript(dcrutil.Address) ([]byte, error) } // ScriptClosure implements ScriptDB with a closure -type ScriptClosure func(btcutil.Address) ([]byte, error) +type ScriptClosure func(dcrutil.Address) ([]byte, error) // GetScript implements ScriptDB by returning the result of calling the closure -func (sc ScriptClosure) GetScript(address btcutil.Address) ([]byte, error) { +func (sc ScriptClosure) GetScript(address dcrutil.Address) ([]byte, error) { return sc(address) } @@ -378,24 +576,33 @@ func (sc ScriptClosure) GetScript(address btcutil.Address) ([]byte, error) { // signature script. func SignTxOutput(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int, pkScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB, - previousScript []byte) ([]byte, error) { - + previousScript []byte, sigType int) ([]byte, error) { sigScript, class, addresses, nrequired, err := sign(chainParams, tx, - idx, pkScript, hashType, kdb, sdb) + idx, pkScript, hashType, kdb, sdb, sigTypes(sigType)) if err != nil { return nil, err } + isStakeType := class == StakeSubmissionTy || + class == StakeSubChangeTy || + class == StakeGenTy || + class == StakeRevocationTy + if isStakeType { + class, err = GetStakeOutSubclass(pkScript) + if err != nil { + return nil, fmt.Errorf("unknown stake output subclass encountered") + } + } + if class == ScriptHashTy { // TODO keep the sub addressed and pass down to merge. realSigScript, _, _, _, err := sign(chainParams, tx, idx, - sigScript, hashType, kdb, sdb) + sigScript, hashType, kdb, sdb, sigTypes(sigType)) if err != nil { return nil, err } - // This is a bad thing. Append the p2sh script as the last - // push in the script. + // Append the p2sh script as the last push in the script. builder := NewScriptBuilder() builder.script = realSigScript builder.AddData(sigScript) diff --git a/txscript/sign_test.go b/txscript/sign_test.go index 1da4cba5..e3cad8a5 100644 --- a/txscript/sign_test.go +++ b/txscript/sign_test.go @@ -1,55 +1,78 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package txscript_test import ( + "crypto/rand" "errors" "fmt" + mrand "math/rand" "testing" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) +const testValueIn = 12345 + type addressToKey struct { - key *btcec.PrivateKey + key *chainec.PrivateKey compressed bool } func mkGetKey(keys map[string]addressToKey) txscript.KeyDB { if keys == nil { - return txscript.KeyClosure(func(addr btcutil.Address) (*btcec.PrivateKey, + return txscript.KeyClosure(func(addr dcrutil.Address) (chainec.PrivateKey, bool, error) { - return nil, false, errors.New("nope") + return nil, false, errors.New("nope 1") }) } - return txscript.KeyClosure(func(addr btcutil.Address) (*btcec.PrivateKey, + return txscript.KeyClosure(func(addr dcrutil.Address) (chainec.PrivateKey, bool, error) { a2k, ok := keys[addr.EncodeAddress()] if !ok { - return nil, false, errors.New("nope") + return nil, false, errors.New("nope 2") } - return a2k.key, a2k.compressed, nil + return *a2k.key, a2k.compressed, nil + }) +} + +func mkGetKeyPub(keys map[string]addressToKey) txscript.KeyDB { + if keys == nil { + return txscript.KeyClosure(func(addr dcrutil.Address) (chainec.PrivateKey, + bool, error) { + return nil, false, errors.New("nope 1") + }) + } + return txscript.KeyClosure(func(addr dcrutil.Address) (chainec.PrivateKey, + bool, error) { + a2k, ok := keys[addr.String()] + if !ok { + return nil, false, errors.New("nope 2") + } + return *a2k.key, a2k.compressed, nil }) } func mkGetScript(scripts map[string][]byte) txscript.ScriptDB { if scripts == nil { - return txscript.ScriptClosure(func(addr btcutil.Address) ( + return txscript.ScriptClosure(func(addr dcrutil.Address) ( []byte, error) { - return nil, errors.New("nope") + return nil, errors.New("nope 3") }) } - return txscript.ScriptClosure(func(addr btcutil.Address) ([]byte, + return txscript.ScriptClosure(func(addr dcrutil.Address) ([]byte, error) { script, ok := scripts[addr.EncodeAddress()] if !ok { - return nil, errors.New("nope") + return nil, errors.New("nope 4") } return script, nil }) @@ -58,7 +81,7 @@ func mkGetScript(scripts map[string][]byte) txscript.ScriptDB { func checkScripts(msg string, tx *wire.MsgTx, idx int, sigScript, pkScript []byte) error { tx.TxIn[idx].SignatureScript = sigScript vm, err := txscript.NewEngine(pkScript, tx, idx, - txscript.ScriptBip16|txscript.ScriptVerifyDERSignatures) + txscript.ScriptBip16|txscript.ScriptVerifyDERSignatures, 0) if err != nil { return fmt.Errorf("failed to make script engine for %s: %v", msg, err) @@ -75,10 +98,10 @@ func checkScripts(msg string, tx *wire.MsgTx, idx int, sigScript, pkScript []byt func signAndCheck(msg string, tx *wire.MsgTx, idx int, pkScript []byte, hashType txscript.SigHashType, kdb txscript.KeyDB, sdb txscript.ScriptDB, - previousScript []byte) error { + previousScript []byte, suite int) error { - sigScript, err := txscript.SignTxOutput(&chaincfg.TestNet3Params, tx, - idx, pkScript, hashType, kdb, sdb, nil) + sigScript, err := txscript.SignTxOutput(&chaincfg.TestNetParams, tx, + idx, pkScript, hashType, kdb, sdb, nil, suite) if err != nil { return fmt.Errorf("failed to sign output %s: %v", msg, err) } @@ -86,6 +109,39 @@ func signAndCheck(msg string, tx *wire.MsgTx, idx int, pkScript []byte, return checkScripts(msg, tx, idx, sigScript, pkScript) } +func signBadAndCheck(msg string, tx *wire.MsgTx, idx int, pkScript []byte, + hashType txscript.SigHashType, kdb txscript.KeyDB, sdb txscript.ScriptDB, + previousScript []byte, suite int) error { + // Setup a PRNG. + randScriptHash := chainhash.HashFuncB(pkScript) + tRand := mrand.New(mrand.NewSource(int64(randScriptHash[0]))) + + // Test SigHashAllValue by corrupting the transaction's ValueIn so that + // the signature becomes invalid. + if hashType == txscript.SigHashAllValue { + tx.TxIn[0].ValueIn = 1 + } + + sigScript, err := txscript.SignTxOutput(&chaincfg.TestNetParams, tx, + idx, pkScript, hashType, kdb, sdb, nil, suite) + if err != nil { + return fmt.Errorf("failed to sign output %s: %v", msg, err) + } + + // Be sure to reset the value in when we're done creating the + // corrupted signature for that flag. + tx.TxIn[0].ValueIn = testValueIn + + // Corrupt a random bit in the signature. + if hashType != txscript.SigHashAllValue { + pos := tRand.Intn(len(sigScript) - 1) + bitPos := tRand.Intn(7) + sigScript[pos] ^= 1 << uint8(bitPos) + } + + return checkScripts(msg, tx, idx, sigScript, pkScript) +} + func TestSignTxOutput(t *testing.T) { t.Parallel() @@ -97,268 +153,362 @@ func TestSignTxOutput(t *testing.T) { txscript.SigHashAll, txscript.SigHashNone, txscript.SigHashSingle, + txscript.SigHashAllValue, txscript.SigHashAll | txscript.SigHashAnyOneCanPay, txscript.SigHashNone | txscript.SigHashAnyOneCanPay, txscript.SigHashSingle | txscript.SigHashAnyOneCanPay, + txscript.SigHashAllValue | txscript.SigHashAnyOneCanPay, + } + signatureSuites := []int{ + secp, + edwards, + secSchnorr, } tx := &wire.MsgTx{ Version: 1, TxIn: []*wire.TxIn{ &wire.TxIn{ PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 0, + Tree: 0, }, - Sequence: 4294967295, + Sequence: 4294967295, + ValueIn: testValueIn, + BlockHeight: 78901, + BlockIndex: 23456, }, &wire.TxIn{ PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 1, + Tree: 0, }, - Sequence: 4294967295, + Sequence: 4294967295, + ValueIn: testValueIn, + BlockHeight: 78901, + BlockIndex: 23456, }, &wire.TxIn{ PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 2, + Tree: 0, }, - Sequence: 4294967295, + Sequence: 4294967295, + ValueIn: testValueIn, + BlockHeight: 78901, + BlockIndex: 23456, }, }, TxOut: []*wire.TxOut{ &wire.TxOut{ - Value: 1, + Version: wire.DefaultPkScriptVersion, + Value: 1, }, &wire.TxOut{ - Value: 2, + Version: wire.DefaultPkScriptVersion, + Value: 2, }, &wire.TxOut{ - Value: 3, + Version: wire.DefaultPkScriptVersion, + Value: 3, }, }, LockTime: 0, + Expiry: 0, } // Pay to Pubkey Hash (uncompressed) + secp256k1 := chainec.Secp256k1 for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey( + rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - if err := signAndCheck(msg, tx, i, pkScript, hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), nil); err != nil { - t.Error(err) - break + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + suite) + + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to Pubkey Hash (uncompressed) (merging with correct) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), sigScript) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - err = checkScripts(msg, tx, i, sigScript, pkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), sigScript, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, pkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } // Pay to Pubkey Hash (compressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + } - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - if err := signAndCheck(msg, tx, i, pkScript, hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), nil); err != nil { - t.Error(err) - break + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to Pubkey Hash (compressed) with duplicate merge for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + } - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), sigScript) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } - err = checkScripts(msg, tx, i, sigScript, pkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), sigScript, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, pkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } - // Pay to PubKey (uncompressed) + // Pay to Pubkey Hash for a ticket(SStx) (compressed) for _, hashType := range hashTypes { for i := range tx.TxIn { msg := fmt.Sprintf("%d:%d", hashType, i) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + keyDB, _, _, err := secp256k1.GenerateKey(rand.Reader) + key, pk := secp256k1.PrivKeyFromBytes(keyDB) + pkBytes := pk.SerializeCompressed() - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + secp) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) break } - pkScript, err := txscript.PayToAddrScript(address) + pkScript, err := txscript.PayToSStx(address) if err != nil { t.Errorf("failed to make pkscript "+ "for %s: %v", msg, err) @@ -366,170 +516,449 @@ func TestSignTxOutput(t *testing.T) { if err := signAndCheck(msg, tx, i, pkScript, hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), nil); err != nil { + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err != nil { t.Error(err) break } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } + } + } + + // Pay to Pubkey Hash for a ticket change (SStx change) (compressed) + for _, hashType := range hashTypes { + for i := range tx.TxIn { + msg := fmt.Sprintf("%d:%d", hashType, i) + + keyDB, _, _, err := secp256k1.GenerateKey(rand.Reader) + key, pk := secp256k1.PrivKeyFromBytes(keyDB) + pkBytes := pk.SerializeCompressed() + + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + secp) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + pkScript, err := txscript.PayToSStxChange(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } + } + } + + // Pay to Pubkey Hash for a ticket spending (SSGen) (compressed) + for _, hashType := range hashTypes { + for i := range tx.TxIn { + msg := fmt.Sprintf("%d:%d", hashType, i) + + keyDB, _, _, err := secp256k1.GenerateKey(rand.Reader) + key, pk := secp256k1.PrivKeyFromBytes(keyDB) + pkBytes := pk.SerializeCompressed() + + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + secp) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + pkScript, err := txscript.PayToSSGen(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } + } + } + + // Pay to Pubkey Hash for a ticket revocation (SSRtx) (compressed) + for _, hashType := range hashTypes { + for i := range tx.TxIn { + msg := fmt.Sprintf("%d:%d", hashType, i) + + keyDB, _, _, err := secp256k1.GenerateKey(rand.Reader) + key, pk := secp256k1.PrivKeyFromBytes(keyDB) + pkBytes := pk.SerializeCompressed() + + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, + secp) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + pkScript, err := txscript.PayToSSRtx(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, secp); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } // Pay to PubKey (uncompressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break + keyDB, _, _, err := secp256k1.GenerateKey(rand.Reader) + key, pk := secp256k1.PrivKeyFromBytes(keyDB) + pkBytes := pk.SerializeUncompressed() + + address, err := dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } + } + } - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + // Pay to PubKey (uncompressed) + for _, hashType := range hashTypes { + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(nil), sigScript) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - err = checkScripts(msg, tx, i, sigScript, pkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } + + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + + } + + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKeyPub(map[string]addressToKey{ + address.String(): {&key, false}, + }), mkGetScript(nil), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKeyPub(map[string]addressToKey{ + address.String(): {&key, false}, + }), mkGetScript(nil), sigScript, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, pkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } // Pay to PubKey (compressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - if err := signAndCheck(msg, tx, i, pkScript, hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), nil); err != nil { - t.Error(err) - break + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } + + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } + + if err := signAndCheck(msg, tx, i, pkScript, hashType, + mkGetKeyPub(map[string]addressToKey{ + address.String(): {&key, true}, + }), mkGetScript(nil), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKeyPub(map[string]addressToKey{ + address.String(): {&key, true}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to PubKey (compressed) with duplicate merge for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, pkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(nil), sigScript) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - err = checkScripts(msg, tx, i, sigScript, pkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, pkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), sigScript, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, pkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } @@ -537,552 +966,754 @@ func TestSignTxOutput(t *testing.T) { // As before, but with p2sh now. // Pay to Pubkey Hash (uncompressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + break + } - if err := signAndCheck(msg, tx, i, scriptPkScript, - hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil); err != nil { - t.Error(err) - break + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } + + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } + + if err := signAndCheck(msg, tx, i, scriptPkScript, + hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to Pubkey Hash (uncompressed) with duplicate merge for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + break + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } - err = checkScripts(msg, tx, i, sigScript, scriptPkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } // Pay to Pubkey Hash (compressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + } - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } - if err := signAndCheck(msg, tx, i, scriptPkScript, - hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil); err != nil { - t.Error(err) - break + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } + + if err := signAndCheck(msg, tx, i, scriptPkScript, + hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to Pubkey Hash (compressed) with duplicate merge for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + } - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKeyHash( - btcutil.Hash160(pk), &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + address, err := dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(pkBytes), &chaincfg.TestNetParams, suite) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } - err = checkScripts(msg, tx, i, sigScript, scriptPkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } // Pay to PubKey (uncompressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - if err := signAndCheck(msg, tx, i, scriptPkScript, - hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil); err != nil { - t.Error(err) - break + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } + + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } + + if err := signAndCheck(msg, tx, i, scriptPkScript, + hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to PubKey (uncompressed) with duplicate merge for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeUncompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeUncompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, false}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } - err = checkScripts(msg, tx, i, sigScript, scriptPkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } // Pay to PubKey (compressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - if err := signAndCheck(msg, tx, i, scriptPkScript, - hashType, - mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil); err != nil { - t.Error(err) - break + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } + + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } + + if err := signAndCheck(msg, tx, i, scriptPkScript, + hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite); err != nil { + t.Error(err) + break + } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, false}, + }), mkGetScript(nil), nil, suite); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } } // Pay to PubKey (compressed) for _, hashType := range hashTypes { - for i := range tx.TxIn { - msg := fmt.Sprintf("%d:%d", hashType, i) + for _, suite := range signatureSuites { + for i := range tx.TxIn { + var keyDB, pkBytes []byte + var key chainec.PrivateKey + var pk chainec.PublicKey + var address dcrutil.Address + var err error - key, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + msg := fmt.Sprintf("%d:%d:%d", hashType, i, suite) - pk := (*btcec.PublicKey)(&key.PublicKey). - SerializeCompressed() - address, err := btcutil.NewAddressPubKey(pk, - &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make address for %s: %v", - msg, err) - break - } + switch suite { + case secp: + keyDB, _, _, _ = secp256k1.GenerateKey(rand.Reader) + key, pk = secp256k1.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressSecpPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - pkScript, err := txscript.PayToAddrScript(address) - if err != nil { - t.Errorf("failed to make pkscript "+ - "for %s: %v", msg, err) - } + case edwards: + keyDB, _, _, _ = chainec.Edwards.GenerateKey(rand.Reader) + key, pk = chainec.Edwards.PrivKeyFromBytes(keyDB) + pkBytes = pk.SerializeCompressed() + address, err = dcrutil.NewAddressEdwardsPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) - if err != nil { - t.Errorf("failed to make p2sh addr for %s: %v", - msg, err) - break - } + case secSchnorr: + keyDB, _, _, _ = chainec.SecSchnorr.GenerateKey(rand.Reader) + key, pk = chainec.SecSchnorr.PrivKeyFromBytes(keyDB) + pkBytes = pk.Serialize() + address, err = dcrutil.NewAddressSecSchnorrPubKey(pkBytes, + &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + } - scriptPkScript, err := txscript.PayToAddrScript( - scriptAddr) - if err != nil { - t.Errorf("failed to make script pkscript for "+ - "%s: %v", msg, err) - break - } + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make pkscript "+ + "for %s: %v", msg, err) + } - sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s: %v", msg, - err) - break - } + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } - // by the above loop, this should be valid, now sign - // again and merge. - sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, - hashType, mkGetKey(map[string]addressToKey{ - address.EncodeAddress(): {key, true}, - }), mkGetScript(map[string][]byte{ - scriptAddr.EncodeAddress(): pkScript, - }), nil) - if err != nil { - t.Errorf("failed to sign output %s a "+ - "second time: %v", msg, err) - break - } + scriptPkScript, err := txscript.PayToAddrScript( + scriptAddr) + if err != nil { + t.Errorf("failed to make script pkscript for "+ + "%s: %v", msg, err) + break + } - err = checkScripts(msg, tx, i, sigScript, scriptPkScript) - if err != nil { - t.Errorf("twice signed script invalid for "+ - "%s: %v", msg, err) - break + sigScript, err := txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = txscript.SignTxOutput( + &chaincfg.TestNetParams, tx, i, scriptPkScript, + hashType, mkGetKey(map[string]addressToKey{ + address.EncodeAddress(): {&key, true}, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): pkScript, + }), nil, suite) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPkScript) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } } } } @@ -1092,34 +1723,24 @@ func TestSignTxOutput(t *testing.T) { for i := range tx.TxIn { msg := fmt.Sprintf("%d:%d", hashType, i) - key1, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + keyDB1, _, _, err := secp256k1.GenerateKey(rand.Reader) + key1, pk1 := secp256k1.PrivKeyFromBytes(keyDB1) + pk1Bytes := pk1.SerializeUncompressed() - pk1 := (*btcec.PublicKey)(&key1.PublicKey). - SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, - &chaincfg.TestNet3Params) + address1, err := dcrutil.NewAddressSecpPubKey(pk1Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) break } - key2, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey 2 for %s: %v", - msg, err) - break - } + keyDB2, _, _, err := secp256k1.GenerateKey(rand.Reader) + key2, pk2 := secp256k1.PrivKeyFromBytes(keyDB2) + pk2Bytes := pk2.SerializeUncompressed() - pk2 := (*btcec.PublicKey)(&key2.PublicKey). - SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, - &chaincfg.TestNet3Params) + address2, err := dcrutil.NewAddressSecpPubKey(pk2Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1127,15 +1748,15 @@ func TestSignTxOutput(t *testing.T) { } pkScript, err := txscript.MultiSigScript( - []*btcutil.AddressPubKey{address1, address2}, + []*dcrutil.AddressSecpPubKey{address1, address2}, 2) if err != nil { t.Errorf("failed to make pkscript "+ "for %s: %v", msg, err) } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1153,14 +1774,24 @@ func TestSignTxOutput(t *testing.T) { if err := signAndCheck(msg, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ - address1.EncodeAddress(): {key1, true}, - address2.EncodeAddress(): {key2, true}, + address1.EncodeAddress(): {&key1, true}, + address2.EncodeAddress(): {&key2, true}, }), mkGetScript(map[string][]byte{ scriptAddr.EncodeAddress(): pkScript, - }), nil); err != nil { + }), nil, secp); err != nil { t.Error(err) break } + + if err := signBadAndCheck(msg, tx, i, pkScript, hashType, + mkGetKey(map[string]addressToKey{ + address1.EncodeAddress(): {&key1, true}, + address2.EncodeAddress(): {&key2, true}, + }), mkGetScript(nil), nil, secp); err == nil { + t.Errorf("corrupted signature validated %s: %v", + msg, err) + break + } } } @@ -1169,34 +1800,24 @@ func TestSignTxOutput(t *testing.T) { for i := range tx.TxIn { msg := fmt.Sprintf("%d:%d", hashType, i) - key1, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + keyDB1, _, _, err := secp256k1.GenerateKey(rand.Reader) + key1, pk1 := secp256k1.PrivKeyFromBytes(keyDB1) + pk1Bytes := pk1.SerializeUncompressed() - pk1 := (*btcec.PublicKey)(&key1.PublicKey). - SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, - &chaincfg.TestNet3Params) + address1, err := dcrutil.NewAddressSecpPubKey(pk1Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) break } - key2, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey 2 for %s: %v", - msg, err) - break - } + keyDB2, _, _, err := secp256k1.GenerateKey(rand.Reader) + key2, pk2 := secp256k1.PrivKeyFromBytes(keyDB2) + pk2Bytes := pk2.SerializeUncompressed() - pk2 := (*btcec.PublicKey)(&key2.PublicKey). - SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, - &chaincfg.TestNet3Params) + address2, err := dcrutil.NewAddressSecpPubKey(pk2Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1204,15 +1825,15 @@ func TestSignTxOutput(t *testing.T) { } pkScript, err := txscript.MultiSigScript( - []*btcutil.AddressPubKey{address1, address2}, + []*dcrutil.AddressSecpPubKey{address1, address2}, 2) if err != nil { t.Errorf("failed to make pkscript "+ "for %s: %v", msg, err) } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1228,12 +1849,12 @@ func TestSignTxOutput(t *testing.T) { } sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, + &chaincfg.TestNetParams, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ - address1.EncodeAddress(): {key1, true}, + address1.EncodeAddress(): {&key1, true}, }), mkGetScript(map[string][]byte{ scriptAddr.EncodeAddress(): pkScript, - }), nil) + }), nil, secp) if err != nil { t.Errorf("failed to sign output %s: %v", msg, err) @@ -1249,12 +1870,12 @@ func TestSignTxOutput(t *testing.T) { // Sign with the other key and merge sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, + &chaincfg.TestNetParams, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ - address2.EncodeAddress(): {key2, true}, + address2.EncodeAddress(): {&key2, true}, }), mkGetScript(map[string][]byte{ scriptAddr.EncodeAddress(): pkScript, - }), sigScript) + }), sigScript, secp) if err != nil { t.Errorf("failed to sign output %s: %v", msg, err) break @@ -1276,34 +1897,24 @@ func TestSignTxOutput(t *testing.T) { for i := range tx.TxIn { msg := fmt.Sprintf("%d:%d", hashType, i) - key1, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey for %s: %v", - msg, err) - break - } + keyDB1, _, _, err := secp256k1.GenerateKey(rand.Reader) + key1, pk1 := secp256k1.PrivKeyFromBytes(keyDB1) + pk1Bytes := pk1.SerializeUncompressed() - pk1 := (*btcec.PublicKey)(&key1.PublicKey). - SerializeCompressed() - address1, err := btcutil.NewAddressPubKey(pk1, - &chaincfg.TestNet3Params) + address1, err := dcrutil.NewAddressSecpPubKey(pk1Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address for %s: %v", msg, err) break } - key2, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Errorf("failed to make privKey 2 for %s: %v", - msg, err) - break - } + keyDB2, _, _, err := secp256k1.GenerateKey(rand.Reader) + key2, pk2 := secp256k1.PrivKeyFromBytes(keyDB2) + pk2Bytes := pk2.SerializeUncompressed() - pk2 := (*btcec.PublicKey)(&key2.PublicKey). - SerializeCompressed() - address2, err := btcutil.NewAddressPubKey(pk2, - &chaincfg.TestNet3Params) + address2, err := dcrutil.NewAddressSecpPubKey(pk2Bytes, + &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make address 2 for %s: %v", msg, err) @@ -1311,15 +1922,15 @@ func TestSignTxOutput(t *testing.T) { } pkScript, err := txscript.MultiSigScript( - []*btcutil.AddressPubKey{address1, address2}, + []*dcrutil.AddressSecpPubKey{address1, address2}, 2) if err != nil { t.Errorf("failed to make pkscript "+ "for %s: %v", msg, err) } - scriptAddr, err := btcutil.NewAddressScriptHash( - pkScript, &chaincfg.TestNet3Params) + scriptAddr, err := dcrutil.NewAddressScriptHash( + pkScript, &chaincfg.TestNetParams) if err != nil { t.Errorf("failed to make p2sh addr for %s: %v", msg, err) @@ -1335,12 +1946,12 @@ func TestSignTxOutput(t *testing.T) { } sigScript, err := txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, + &chaincfg.TestNetParams, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ - address1.EncodeAddress(): {key1, true}, + address1.EncodeAddress(): {&key1, true}, }), mkGetScript(map[string][]byte{ scriptAddr.EncodeAddress(): pkScript, - }), nil) + }), nil, secp) if err != nil { t.Errorf("failed to sign output %s: %v", msg, err) @@ -1356,13 +1967,13 @@ func TestSignTxOutput(t *testing.T) { // Sign with the other key and merge sigScript, err = txscript.SignTxOutput( - &chaincfg.TestNet3Params, tx, i, scriptPkScript, + &chaincfg.TestNetParams, tx, i, scriptPkScript, hashType, mkGetKey(map[string]addressToKey{ - address1.EncodeAddress(): {key1, true}, - address2.EncodeAddress(): {key2, true}, + address1.EncodeAddress(): {&key1, true}, + address2.EncodeAddress(): {&key2, true}, }), mkGetScript(map[string][]byte{ scriptAddr.EncodeAddress(): pkScript, - }), sigScript) + }), sigScript, secp) if err != nil { t.Errorf("failed to sign output %s: %v", msg, err) break @@ -1414,13 +2025,16 @@ var ( 0x63, 0x32, 0x62, 0xaa, 0x60, 0xc6, 0x83, 0x30, 0xbd, 0x24, 0x7e, 0xef, 0xdb, 0x6f, 0x2e, 0x8d, 0x56, 0xf0, 0x3c, 0x9f, 0x6d, 0xb6, 0xf8} - uncompressedPkScript = []byte{0x76, 0xa9, 0x14, 0xd1, 0x7c, 0xb5, - 0xeb, 0xa4, 0x02, 0xcb, 0x68, 0xe0, 0x69, 0x56, 0xbf, 0x32, - 0x53, 0x90, 0x0e, 0x0a, 0x86, 0xc9, 0xfa, 0x88, 0xac} - compressedPkScript = []byte{0x76, 0xa9, 0x14, 0x27, 0x4d, 0x9f, 0x7f, - 0x61, 0x7e, 0x7c, 0x7a, 0x1c, 0x1f, 0xb2, 0x75, 0x79, 0x10, - 0x43, 0x65, 0x68, 0x27, 0x9d, 0x86, 0x88, 0xac} - shortPkScript = []byte{0x76, 0xa9, 0x14, 0xd1, 0x7c, 0xb5, + _, thisPubKey = chainec.Secp256k1.PrivKeyFromBytes(privKeyD) + thisAddressUnc, _ = dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(thisPubKey.SerializeUncompressed()), + &chaincfg.TestNetParams, secp) + uncompressedPkScript, _ = txscript.PayToAddrScript(thisAddressUnc) + thisAddressCom, _ = dcrutil.NewAddressPubKeyHash( + dcrutil.Hash160(thisPubKey.SerializeCompressed()), + &chaincfg.TestNetParams, secp) + compressedPkScript, _ = txscript.PayToAddrScript(thisAddressCom) + shortPkScript = []byte{0x76, 0xa9, 0x14, 0xd1, 0x7c, 0xb5, 0xeb, 0xa4, 0x02, 0xcb, 0x68, 0xe0, 0x69, 0x56, 0xbf, 0x32, 0x53, 0x90, 0x0e, 0x0a, 0x88, 0xac} uncompressedAddrStr = "1L6fd93zGmtzkK6CsZFVVoCwzZV3MUtJ4F" @@ -1632,8 +2246,7 @@ var sigScriptTests = []tstSigScript{ // and we don't have the private keys. func TestSignatureScript(t *testing.T) { t.Parallel() - - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyD) + privKey, _ := chainec.Secp256k1.PrivKeyFromBytes(privKeyD) nexttest: for i := range sigScriptTests { @@ -1692,7 +2305,7 @@ nexttest: scriptFlags := txscript.ScriptBip16 | txscript.ScriptVerifyDERSignatures for j := range tx.TxIn { vm, err := txscript.NewEngine(sigScriptTests[i]. - inputs[j].txout.PkScript, tx, j, scriptFlags) + inputs[j].txout.PkScript, tx, j, scriptFlags, 0) if err != nil { t.Errorf("cannot create script vm for test %v: %v", sigScriptTests[i].name, err) diff --git a/txscript/stack.go b/txscript/stack.go index 664c000d..191080bf 100644 --- a/txscript/stack.go +++ b/txscript/stack.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -28,7 +29,7 @@ func fromBool(v bool) []byte { return nil } -// stack represents a stack of immutable objects to be used with bitcoin +// stack represents a stack of immutable objects to be used with decred // scripts. Objects may be shared, therefore in usage if a value is to be // changed it *must* be deep-copied first to avoid changing other values on the // stack. @@ -77,13 +78,13 @@ func (s *stack) PopByteArray() ([]byte, error) { // consensus rules imposed on data interpreted as numbers. // // Stack transformation: [... x1 x2 x3] -> [... x1 x2] -func (s *stack) PopInt() (scriptNum, error) { +func (s *stack) PopInt(maxLen int) (scriptNum, error) { so, err := s.PopByteArray() if err != nil { return 0, err } - return makeScriptNum(so, s.verifyMinimalData) + return makeScriptNum(so, s.verifyMinimalData, maxLen) } // PopBool pops the value off the top of the stack, converts it into a bool, and @@ -118,7 +119,7 @@ func (s *stack) PeekInt(idx int32) (scriptNum, error) { return 0, err } - return makeScriptNum(so, s.verifyMinimalData) + return makeScriptNum(so, s.verifyMinimalData, mathOpCodeMaxScriptNumLen) } // PeekBool returns the Nth item on the stack as a bool without removing it. diff --git a/txscript/stack_test.go b/txscript/stack_test.go index efffc656..6a20dfba 100644 --- a/txscript/stack_test.go +++ b/txscript/stack_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -175,7 +176,7 @@ func TestStack(t *testing.T) { "popInt 0", [][]byte{{0x0}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -191,7 +192,7 @@ func TestStack(t *testing.T) { "popInt -0", [][]byte{{0x80}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -207,7 +208,7 @@ func TestStack(t *testing.T) { "popInt 1", [][]byte{{0x01}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -223,7 +224,7 @@ func TestStack(t *testing.T) { "popInt 1 leading 0", [][]byte{{0x01, 0x00, 0x00, 0x00}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -240,7 +241,7 @@ func TestStack(t *testing.T) { "popInt -1", [][]byte{{0x81}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -256,7 +257,7 @@ func TestStack(t *testing.T) { "popInt -1 leading 0", [][]byte{{0x01, 0x00, 0x00, 0x80}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -274,7 +275,7 @@ func TestStack(t *testing.T) { "popInt -513", [][]byte{{0x1, 0x82}}, func(s *stack) error { - v, err := s.PopInt() + v, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -926,7 +927,7 @@ func TestStack(t *testing.T) { s.PushInt(scriptNum(1)) // Peek int is otherwise pretty well tested, // just check it works. - val, err := s.PopInt() + val, err := s.PopInt(mathOpCodeMaxScriptNumLen) if err != nil { return err } @@ -944,7 +945,7 @@ func TestStack(t *testing.T) { func(s *stack) error { // Peek int is otherwise pretty well tested, // just check it works. - _, err := s.PopInt() + _, err := s.PopInt(mathOpCodeMaxScriptNumLen) return err }, ErrStackUnderflow, diff --git a/txscript/standard.go b/txscript/standard.go index 71349248..887d87b4 100644 --- a/txscript/standard.go +++ b/txscript/standard.go @@ -1,18 +1,24 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package txscript import ( - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcutil" + "encoding/binary" + "fmt" + + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/chaincfg/chainec" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrutil" ) const ( // maxDataCarrierSize is the maximum number of bytes allowed in pushed - // data to be considered a nulldata transaction - maxDataCarrierSize = 80 + // data to be considered a nulldata transaction. + maxDataCarrierSize = 256 // StandardVerifyFlags are the script flags which are used when // executing transaction scripts to enforce additional checks which @@ -28,9 +34,10 @@ const ( ScriptVerifyDERSignatures | ScriptVerifyStrictEncoding | ScriptVerifyMinimalData | - ScriptStrictMultiSig | ScriptDiscourageUpgradableNops | - ScriptVerifyCleanStack + ScriptVerifyCleanStack | + ScriptVerifyCheckLockTimeVerify | + ScriptVerifyLowS ) // ScriptClass is an enumeration for the list of standard types of script. @@ -38,23 +45,35 @@ type ScriptClass byte // Classes of script payment known about in the blockchain. const ( - NonStandardTy ScriptClass = iota // None of the recognized forms. - PubKeyTy // Pay pubkey. - PubKeyHashTy // Pay pubkey hash. - ScriptHashTy // Pay to script hash. - MultiSigTy // Multi signature. - NullDataTy // Empty data-only (provably prunable). + NonStandardTy ScriptClass = iota // None of the recognized forms. + PubKeyTy // Pay pubkey. + PubKeyHashTy // Pay pubkey hash. + ScriptHashTy // Pay to script hash. + MultiSigTy // Multi signature. + NullDataTy // Empty data-only (provably prunable). + StakeSubmissionTy // Stake submission. + StakeGenTy // Stake generation + StakeRevocationTy // Stake revocation. + StakeSubChangeTy // Change for stake submission tx. + PubkeyAltTy // Alternative signature pubkey. + PubkeyHashAltTy // Alternative signature pubkey hash. ) // scriptClassToName houses the human-readable strings which describe each // script class. var scriptClassToName = []string{ - NonStandardTy: "nonstandard", - PubKeyTy: "pubkey", - PubKeyHashTy: "pubkeyhash", - ScriptHashTy: "scripthash", - MultiSigTy: "multisig", - NullDataTy: "nulldata", + NonStandardTy: "nonstandard", + PubKeyTy: "pubkey", + PubkeyAltTy: "pubkeyalt", + PubKeyHashTy: "pubkeyhash", + PubkeyHashAltTy: "pubkeyhashalt", + ScriptHashTy: "scripthash", + MultiSigTy: "multisig", + NullDataTy: "nulldata", + StakeSubmissionTy: "stakesubmission", + StakeGenTy: "stakegen", + StakeRevocationTy: "stakerevoke", + StakeSubChangeTy: "sstxchange", } // String implements the Stringer interface by returning the name of @@ -76,6 +95,38 @@ func isPubkey(pops []parsedOpcode) bool { pops[1].opcode.value == OP_CHECKSIG } +// isOneByteMaxDataPush returns true if the parsed opcode pushes exactly one +// byte to the stack. +func isOneByteMaxDataPush(po parsedOpcode) bool { + return po.opcode.value == OP_1 || + po.opcode.value == OP_2 || + po.opcode.value == OP_3 || + po.opcode.value == OP_4 || + po.opcode.value == OP_5 || + po.opcode.value == OP_6 || + po.opcode.value == OP_7 || + po.opcode.value == OP_8 || + po.opcode.value == OP_9 || + po.opcode.value == OP_10 || + po.opcode.value == OP_11 || + po.opcode.value == OP_12 || + po.opcode.value == OP_13 || + po.opcode.value == OP_14 || + po.opcode.value == OP_15 || + po.opcode.value == OP_16 || + po.opcode.value == OP_DATA_1 +} + +// isPubkey returns true if the script passed is an alternative pay-to-pubkey +// transaction, false otherwise. +func isPubkeyAlt(pops []parsedOpcode) bool { + // An alternative pubkey must be less than 512 bytes. + return len(pops) == 3 && + len(pops[0].data) < 512 && + isOneByteMaxDataPush(pops[1]) && + pops[2].opcode.value == OP_CHECKSIGALT +} + // isPubkeyHash returns true if the script passed is a pay-to-pubkey-hash // transaction, false otherwise. func isPubkeyHash(pops []parsedOpcode) bool { @@ -85,7 +136,50 @@ func isPubkeyHash(pops []parsedOpcode) bool { pops[2].opcode.value == OP_DATA_20 && pops[3].opcode.value == OP_EQUALVERIFY && pops[4].opcode.value == OP_CHECKSIG +} +// isPubkeyHashAlt returns true if the script passed is a pay-to-pubkey-hash +// transaction, false otherwise. +func isPubkeyHashAlt(pops []parsedOpcode) bool { + return len(pops) == 6 && + pops[0].opcode.value == OP_DUP && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUALVERIFY && + isOneByteMaxDataPush(pops[4]) && + pops[5].opcode.value == OP_CHECKSIGALT +} + +// isScriptHash returns true if the script passed is a pay-to-script-hash +// transaction, false otherwise. +func isScriptHash(pops []parsedOpcode) bool { + return len(pops) == 3 && + pops[0].opcode.value == OP_HASH160 && + pops[1].opcode.value == OP_DATA_20 && + pops[2].opcode.value == OP_EQUAL +} + +// isAnyKindOfScriptHash returns true if the script passed is a pay-to-script-hash +// or stake pay-to-script-hash transaction, false otherwise. Used to make the +// engine have the correct behaviour. +func isAnyKindOfScriptHash(pops []parsedOpcode) bool { + standardP2SH := len(pops) == 3 && + pops[0].opcode.value == OP_HASH160 && + pops[1].opcode.value == OP_DATA_20 && + pops[2].opcode.value == OP_EQUAL + if standardP2SH { + return true + } + + stakeP2SH := len(pops) == 4 && + (pops[0].opcode.value >= 186 && pops[0].opcode.value <= 189) && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUAL + if stakeP2SH { + return true + } + return false } // isMultiSig returns true if the passed script is a multisig transaction, false @@ -106,6 +200,13 @@ func isMultiSig(pops []parsedOpcode) bool { if pops[l-1].opcode.value != OP_CHECKMULTISIG { return false } + + // Verify the number of pubkeys specified matches the actual number + // of pubkeys provided. + if l-2-1 != asSmallInt(pops[l-2].opcode) { + return false + } + for _, pop := range pops[1 : l-2] { // Valid pubkeys are either 33 or 65 bytes. if len(pop.data) != 33 && len(pop.data) != 65 { @@ -115,6 +216,34 @@ func isMultiSig(pops []parsedOpcode) bool { return true } +// IsMultisigScript takes a script, parses it, then returns whether or +// not it is a multisignature script. +func IsMultisigScript(script []byte) (bool, error) { + pops, err := parseScript(script) + if err != nil { + return false, err + } + return isMultiSig(pops), nil +} + +// IsMultisigScript takes a script, parses it, then returns whether or +// not it is a multisignature script. +func IsMultisigSigScript(script []byte) bool { + if len(script) == 0 || script == nil { + return false + } + pops, err := parseScript(script) + if err != nil { + return false + } + subPops, err := parseScript(pops[len(pops)-1].data) + if err != nil { + return false + } + + return isMultiSig(subPops) +} + // isNullData returns true if the passed script is a null data transaction, // false otherwise. func isNullData(pops []parsedOpcode) bool { @@ -132,31 +261,147 @@ func isNullData(pops []parsedOpcode) bool { len(pops[1].data) <= maxDataCarrierSize } +// isStakeSubmission returns true if the script passed is a stake submission tx, +// false otherwise. +func isStakeSubmission(pops []parsedOpcode) bool { + if len(pops) == 6 && + pops[0].opcode.value == OP_SSTX && + pops[1].opcode.value == OP_DUP && + pops[2].opcode.value == OP_HASH160 && + pops[3].opcode.value == OP_DATA_20 && + pops[4].opcode.value == OP_EQUALVERIFY && + pops[5].opcode.value == OP_CHECKSIG { + return true + } + + if len(pops) == 4 && + pops[0].opcode.value == OP_SSTX && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUAL { + return true + } + + return false +} + +// isStakeGen returns true if the script passed is a stake generation tx, +// false otherwise. +func isStakeGen(pops []parsedOpcode) bool { + if len(pops) == 6 && + pops[0].opcode.value == OP_SSGEN && + pops[1].opcode.value == OP_DUP && + pops[2].opcode.value == OP_HASH160 && + pops[3].opcode.value == OP_DATA_20 && + pops[4].opcode.value == OP_EQUALVERIFY && + pops[5].opcode.value == OP_CHECKSIG { + return true + } + + if len(pops) == 4 && + pops[0].opcode.value == OP_SSGEN && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUAL { + return true + } + + return false +} + +// isStakeRevocation returns true if the script passed is a stake submission +// revocation tx, false otherwise. +func isStakeRevocation(pops []parsedOpcode) bool { + if len(pops) == 6 && + pops[0].opcode.value == OP_SSRTX && + pops[1].opcode.value == OP_DUP && + pops[2].opcode.value == OP_HASH160 && + pops[3].opcode.value == OP_DATA_20 && + pops[4].opcode.value == OP_EQUALVERIFY && + pops[5].opcode.value == OP_CHECKSIG { + return true + } + + if len(pops) == 4 && + pops[0].opcode.value == OP_SSRTX && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUAL { + return true + } + + return false +} + +// isSStxChange returns true if the script passed is a stake submission +// change tx, false otherwise. +func isSStxChange(pops []parsedOpcode) bool { + if len(pops) == 6 && + pops[0].opcode.value == OP_SSTXCHANGE && + pops[1].opcode.value == OP_DUP && + pops[2].opcode.value == OP_HASH160 && + pops[3].opcode.value == OP_DATA_20 && + pops[4].opcode.value == OP_EQUALVERIFY && + pops[5].opcode.value == OP_CHECKSIG { + return true + } + + if len(pops) == 4 && + pops[0].opcode.value == OP_SSTXCHANGE && + pops[1].opcode.value == OP_HASH160 && + pops[2].opcode.value == OP_DATA_20 && + pops[3].opcode.value == OP_EQUAL { + return true + } + + return false +} + // scriptType returns the type of the script being inspected from the known // standard types. func typeOfScript(pops []parsedOpcode) ScriptClass { if isPubkey(pops) { return PubKeyTy + } else if isPubkeyAlt(pops) { + return PubkeyAltTy } else if isPubkeyHash(pops) { return PubKeyHashTy + } else if isPubkeyHashAlt(pops) { + return PubkeyHashAltTy } else if isScriptHash(pops) { return ScriptHashTy } else if isMultiSig(pops) { return MultiSigTy } else if isNullData(pops) { return NullDataTy + } else if isStakeSubmission(pops) { + return StakeSubmissionTy + } else if isStakeGen(pops) { + return StakeGenTy + } else if isStakeRevocation(pops) { + return StakeRevocationTy + } else if isSStxChange(pops) { + return StakeSubChangeTy } + return NonStandardTy } // GetScriptClass returns the class of the script passed. // // NonStandardTy will be returned when the script does not parse. -func GetScriptClass(script []byte) ScriptClass { +func GetScriptClass(version uint16, script []byte) ScriptClass { + // NullDataTy outputs are allowed to have non-default script + // versions. However, other types are not. + if version != DefaultScriptVersion { + return NonStandardTy + } + pops, err := parseScript(script) if err != nil { return NonStandardTy } + return typeOfScript(pops) } @@ -165,7 +410,8 @@ func GetScriptClass(script []byte) ScriptClass { // then -1 is returned. We are an internal function and thus assume that class // is the real class of pops (and we can thus assume things that were determined // while finding out the type). -func expectedInputs(pops []parsedOpcode, class ScriptClass) int { +func expectedInputs(pops []parsedOpcode, class ScriptClass, + subclass ScriptClass) int { switch class { case PubKeyTy: return 1 @@ -173,8 +419,32 @@ func expectedInputs(pops []parsedOpcode, class ScriptClass) int { case PubKeyHashTy: return 2 + case StakeSubmissionTy: + if subclass == PubKeyHashTy { + return 2 + } + return 1 // P2SH + + case StakeGenTy: + if subclass == PubKeyHashTy { + return 2 + } + return 1 // P2SH + + case StakeRevocationTy: + if subclass == PubKeyHashTy { + return 2 + } + return 1 // P2SH + + case StakeSubChangeTy: + if subclass == PubKeyHashTy { + return 2 + } + return 1 // P2SH + case ScriptHashTy: - // Not including script. That is handled by the caller. + // Not including script, handled below. return 1 case MultiSigTy: @@ -213,6 +483,88 @@ type ScriptInfo struct { SigOps int } +// IsStakeOutput returns true is a script output is a stake type. +func IsStakeOutput(pkScript []byte) bool { + pkPops, err := parseScript(pkScript) + if err != nil { + return false + } + + class := typeOfScript(pkPops) + return class == StakeSubmissionTy || + class == StakeGenTy || + class == StakeRevocationTy || + class == StakeSubChangeTy +} + +// GetStakeOutSubclass extracts the subclass (P2PKH or P2SH) +// from a stake output. +func GetStakeOutSubclass(pkScript []byte) (ScriptClass, error) { + pkPops, err := parseScript(pkScript) + if err != nil { + return 0, err + } + + class := typeOfScript(pkPops) + isStake := class == StakeSubmissionTy || + class == StakeGenTy || + class == StakeRevocationTy || + class == StakeSubChangeTy + + subClass := ScriptClass(0) + if isStake { + stakeSubscript := make([]parsedOpcode, 0) + for _, pop := range pkPops { + if pop.opcode.value >= 186 && pop.opcode.value <= 189 { + continue + } + stakeSubscript = append(stakeSubscript, pop) + } + + subClass = typeOfScript(stakeSubscript) + } else { + return 0, fmt.Errorf("not a stake output") + } + + return subClass, nil +} + +// getStakeOutSubscript extracts the subscript (P2PKH or P2SH) +// from a stake output. +func getStakeOutSubscript(pkScript []byte) []byte { + return pkScript[1:] +} + +// GetPkScriptFromP2SHSigScript returns the embedded pkScript from the signature +// script of a transaction spending a P2SH output. +func GetPkScriptFromP2SHSigScript(sigScript []byte) ([]byte, error) { + sigPops, err := parseScript(sigScript) + if err != nil { + return nil, err + } + + // The pay-to-hash-script is the final data push of the + // signature script. + return sigPops[len(sigPops)-1].data, nil +} + +// ContainsStakeOpCodes returns whether or not a pkScript contains stake tagging +// OP codes. +func ContainsStakeOpCodes(pkScript []byte) (bool, error) { + shPops, err := parseScript(pkScript) + if err != nil { + return false, err + } + + for _, pop := range shPops { + if pop.opcode.value >= 186 && pop.opcode.value <= 189 { + return true, nil + } + } + + return false, nil +} + // CalcScriptInfo returns a structure providing data about the provided script // pair. It will error if the pair is in someway invalid such that they can not // be analysed, i.e. if they do not parse or the pkScript is not a push-only @@ -237,7 +589,18 @@ func CalcScriptInfo(sigScript, pkScript []byte, bip16 bool) (*ScriptInfo, error) return nil, ErrStackNonPushOnly } - si.ExpectedInputs = expectedInputs(pkPops, si.PkScriptClass) + subClass := ScriptClass(0) + if si.PkScriptClass == StakeSubmissionTy || + si.PkScriptClass == StakeGenTy || + si.PkScriptClass == StakeRevocationTy || + si.PkScriptClass == StakeSubChangeTy { + subClass, err = GetStakeOutSubclass(pkScript) + if err != nil { + return nil, err + } + } + + si.ExpectedInputs = expectedInputs(pkPops, si.PkScriptClass, subClass) // All entries pushed to stack (or are OP_RESERVED and exec will fail). si.NumInputs = len(sigPops) @@ -252,7 +615,7 @@ func CalcScriptInfo(sigScript, pkScript []byte, bip16 bool) (*ScriptInfo, error) return nil, err } - shInputs := expectedInputs(shPops, typeOfScript(shPops)) + shInputs := expectedInputs(shPops, typeOfScript(shPops), 0) if shInputs == -1 { si.ExpectedInputs = -1 } else { @@ -291,6 +654,20 @@ func CalcMultiSigStats(script []byte) (int, int, error) { return numPubKeys, numSigs, nil } +// MultisigRedeemScriptFromScriptSig attempts to extract a multi- +// signature redeem script from a P2SH-redeeming input. It returns +// nil if the signature script is not a multisignature script. +func MultisigRedeemScriptFromScriptSig(script []byte) ([]byte, error) { + pops, err := parseScript(script) + if err != nil { + return nil, err + } + + // The redeemScript is always the last item on the stack of + // the script sig. + return pops[len(pops)-1].data, nil +} + // payToPubKeyHashScript creates a new script to pay a transaction // output to a 20-byte pubkey hash. It is expected that the input is a valid // hash. @@ -300,6 +677,27 @@ func payToPubKeyHashScript(pubKeyHash []byte) ([]byte, error) { Script() } +// payToPubKeyHashEdwardsScript creates a new script to pay a transaction +// output to a 20-byte pubkey hash of an Edwards public key. It is expected +// that the input is a valid hash. +func payToPubKeyHashEdwardsScript(pubKeyHash []byte) ([]byte, error) { + edwardsData := []byte{byte(edwards)} + return NewScriptBuilder().AddOp(OP_DUP).AddOp(OP_HASH160). + AddData(pubKeyHash).AddOp(OP_EQUALVERIFY).AddData(edwardsData). + AddOp(OP_CHECKSIGALT).Script() +} + +// payToPubKeyHashSchnorrScript creates a new script to pay a transaction +// output to a 20-byte pubkey hash of a secp256k1 public key, but expecting +// a schnorr signature instead of a classic secp256k1 signature. It is +// expected that the input is a valid hash. +func payToPubKeyHashSchnorrScript(pubKeyHash []byte) ([]byte, error) { + schnorrData := []byte{byte(secSchnorr)} + return NewScriptBuilder().AddOp(OP_DUP).AddOp(OP_HASH160). + AddData(pubKeyHash).AddOp(OP_EQUALVERIFY).AddData(schnorrData). + AddOp(OP_CHECKSIGALT).Script() +} + // payToScriptHashScript creates a new script to pay a transaction output to a // script hash. It is expected that the input is a valid hash. func payToScriptHashScript(scriptHash []byte) ([]byte, error) { @@ -307,6 +705,35 @@ func payToScriptHashScript(scriptHash []byte) ([]byte, error) { AddOp(OP_EQUAL).Script() } +// GetScriptHashFromP2SHScript extracts the script hash from a valid +// P2SH pkScript. +func GetScriptHashFromP2SHScript(pkScript []byte) ([]byte, error) { + pops, err := parseScript(pkScript) + if err != nil { + return nil, err + } + + var sh []byte + reachedHash160DataPush := false + for _, p := range pops { + if p.opcode.value == OP_HASH160 { + reachedHash160DataPush = true + continue + } + if reachedHash160DataPush { + sh = p.data + break + } + } + + return sh, nil +} + +// PayToScriptHashScript is the exported version of payToScriptHashScript. +func PayToScriptHashScript(scriptHash []byte) ([]byte, error) { + return payToScriptHashScript(scriptHash) +} + // payToPubkeyScript creates a new script to pay a transaction output to a // public key. It is expected that the input is a valid pubkey. func payToPubKeyScript(serializedPubKey []byte) ([]byte, error) { @@ -314,27 +741,354 @@ func payToPubKeyScript(serializedPubKey []byte) ([]byte, error) { AddOp(OP_CHECKSIG).Script() } +// payToEdwardsPubKeyScript creates a new script to pay a transaction output +// to an Ed25519 public key. It is expected that the input is a valid pubkey. +func payToEdwardsPubKeyScript(serializedPubKey []byte) ([]byte, error) { + edwardsData := []byte{byte(edwards)} + return NewScriptBuilder().AddData(serializedPubKey).AddData(edwardsData). + AddOp(OP_CHECKSIGALT).Script() +} + +// payToSchnorrPubKeyScript creates a new script to pay a transaction output +// to a secp256k1 public key, but to be signed by Schnorr type signature. It +// is expected that the input is a valid pubkey. +func payToSchnorrPubKeyScript(serializedPubKey []byte) ([]byte, error) { + schnorrData := []byte{byte(secSchnorr)} + return NewScriptBuilder().AddData(serializedPubKey).AddData(schnorrData). + AddOp(OP_CHECKSIGALT).Script() +} + +// PayToSStx creates a new script to pay a transaction output to a script hash or +// public key hash, but tags the output with OP_SSTX. For use in constructing +// valid SStxs. +func PayToSStx(addr dcrutil.Address) ([]byte, error) { + if addr == nil { + return nil, ErrUnsupportedAddress + } + + // Only pay to pubkey hash and pay to script hash are + // supported. + scriptType := PubKeyHashTy + switch addr := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if addr.DSA(addr.Net()) != chainec.ECTypeSecp256k1 { + return nil, ErrUnsupportedAddress + } + break + case *dcrutil.AddressScriptHash: + scriptType = ScriptHashTy + break + default: + return nil, ErrUnsupportedAddress + } + + hash := addr.ScriptAddress() + + if scriptType == PubKeyHashTy { + return NewScriptBuilder().AddOp(OP_SSTX).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(hash).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() + } + return NewScriptBuilder().AddOp(OP_SSTX).AddOp(OP_HASH160). + AddData(hash).AddOp(OP_EQUAL).Script() +} + +// PayToSStxChange creates a new script to pay a transaction output to a +// public key hash, but tags the output with OP_SSTXCHANGE. For use in constructing +// valid SStxs. +func PayToSStxChange(addr dcrutil.Address) ([]byte, error) { + if addr == nil { + return nil, ErrUnsupportedAddress + } + + // Only pay to pubkey hash and pay to script hash are + // supported. + scriptType := PubKeyHashTy + switch addr := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if addr.DSA(addr.Net()) != chainec.ECTypeSecp256k1 { + return nil, ErrUnsupportedAddress + } + break + case *dcrutil.AddressScriptHash: + scriptType = ScriptHashTy + break + default: + return nil, ErrUnsupportedAddress + } + + hash := addr.ScriptAddress() + + if scriptType == PubKeyHashTy { + return NewScriptBuilder().AddOp(OP_SSTXCHANGE).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(hash).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() + } + return NewScriptBuilder().AddOp(OP_SSTXCHANGE).AddOp(OP_HASH160). + AddData(hash).AddOp(OP_EQUAL).Script() +} + +// PayToSSGen creates a new script to pay a transaction output to a public key +// hash or script hash, but tags the output with OP_SSGEN. For use in constructing +// valid SSGen txs. +func PayToSSGen(addr dcrutil.Address) ([]byte, error) { + if addr == nil { + return nil, ErrUnsupportedAddress + } + + // Only pay to pubkey hash and pay to script hash are + // supported. + scriptType := PubKeyHashTy + switch addr := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if addr.DSA(addr.Net()) != chainec.ECTypeSecp256k1 { + return nil, ErrUnsupportedAddress + } + break + case *dcrutil.AddressScriptHash: + scriptType = ScriptHashTy + break + default: + return nil, ErrUnsupportedAddress + } + + hash := addr.ScriptAddress() + + if scriptType == PubKeyHashTy { + return NewScriptBuilder().AddOp(OP_SSGEN).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(hash).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() + } + return NewScriptBuilder().AddOp(OP_SSGEN).AddOp(OP_HASH160). + AddData(hash).AddOp(OP_EQUAL).Script() +} + +// PayToSSGenPKHDirect creates a new script to pay a transaction output to a +// public key hash, but tags the output with OP_SSGEN. For use in constructing +// valid SSGen txs. Unlike PayToSSGen, this function directly uses the HASH160 +// pubkeyhash (instead of an address). +func PayToSSGenPKHDirect(pkh []byte) ([]byte, error) { + if pkh == nil { + return nil, ErrUnsupportedAddress + } + + return NewScriptBuilder().AddOp(OP_SSGEN).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(pkh).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() +} + +// PayToSSGenSHDirect creates a new script to pay a transaction output to a +// script hash, but tags the output with OP_SSGEN. For use in constructing +// valid SSGen txs. Unlike PayToSSGen, this function directly uses the HASH160 +// script hash (instead of an address). +func PayToSSGenSHDirect(sh []byte) ([]byte, error) { + if sh == nil { + return nil, ErrUnsupportedAddress + } + + return NewScriptBuilder().AddOp(OP_SSGEN).AddOp(OP_HASH160). + AddData(sh).AddOp(OP_EQUAL).Script() +} + +// PayToSSRtx creates a new script to pay a transaction output to a +// public key hash, but tags the output with OP_SSRTX. For use in constructing +// valid SSRtx. +func PayToSSRtx(addr dcrutil.Address) ([]byte, error) { + if addr == nil { + return nil, ErrUnsupportedAddress + } + + // Only pay to pubkey hash and pay to script hash are + // supported. + scriptType := PubKeyHashTy + switch addr := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if addr.DSA(addr.Net()) != chainec.ECTypeSecp256k1 { + return nil, ErrUnsupportedAddress + } + break + case *dcrutil.AddressScriptHash: + scriptType = ScriptHashTy + break + default: + return nil, ErrUnsupportedAddress + } + + hash := addr.ScriptAddress() + + if scriptType == PubKeyHashTy { + return NewScriptBuilder().AddOp(OP_SSRTX).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(hash).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() + } + return NewScriptBuilder().AddOp(OP_SSRTX).AddOp(OP_HASH160). + AddData(hash).AddOp(OP_EQUAL).Script() +} + +// PayToSSRtxPKHDirect creates a new script to pay a transaction output to a +// public key hash, but tags the output with OP_SSRTX. For use in constructing +// valid SSRtx. Unlike PayToSSRtx, this function directly uses the HASH160 +// pubkeyhash (instead of an address). +func PayToSSRtxPKHDirect(pkh []byte) ([]byte, error) { + if pkh == nil { + return nil, ErrUnsupportedAddress + } + + return NewScriptBuilder().AddOp(OP_SSRTX).AddOp(OP_DUP). + AddOp(OP_HASH160).AddData(pkh).AddOp(OP_EQUALVERIFY). + AddOp(OP_CHECKSIG).Script() +} + +// PayToSSRtxDirect creates a new script to pay a transaction output to a +// script hash, but tags the output with OP_SSRTX. For use in constructing +// valid SSRtx. Unlike PayToSSRtx, this function directly uses the HASH160 +// script hash (instead of an address). +func PayToSSRtxSHDirect(sh []byte) ([]byte, error) { + if sh == nil { + return nil, ErrUnsupportedAddress + } + + return NewScriptBuilder().AddOp(OP_SSRTX).AddOp(OP_HASH160). + AddData(sh).AddOp(OP_EQUAL).Script() +} + +// GenerateSStxAddrPush generates an OP_RETURN push for SSGen payment addresses in +// an SStx. +func GenerateSStxAddrPush(addr dcrutil.Address, amount dcrutil.Amount, + limits uint16) ([]byte, error) { + if addr == nil { + return nil, ErrUnsupportedAddress + } + + // Only pay to pubkey hash and pay to script hash are + // supported. + scriptType := PubKeyHashTy + switch addr := addr.(type) { + case *dcrutil.AddressPubKeyHash: + if addr.DSA(addr.Net()) != chainec.ECTypeSecp256k1 { + return nil, ErrUnsupportedAddress + } + break + case *dcrutil.AddressScriptHash: + scriptType = ScriptHashTy + break + default: + return nil, ErrUnsupportedAddress + } + + // Prefix + dataPushes := []byte{ + 0x6a, // OP_RETURN + 0x1e, // OP_DATA_30 + } + + hash := addr.ScriptAddress() + + amountBuffer := make([]byte, 8) + binary.LittleEndian.PutUint64(amountBuffer, uint64(amount)) + + // Set the bit flag indicating pay to script hash. + if scriptType == ScriptHashTy { + amountBuffer[7] |= 1 << 7 + } + + limitsBuffer := make([]byte, 2) + binary.LittleEndian.PutUint16(limitsBuffer, limits) + + // Concatenate the prefix, pubkeyhash, and amount. + addrOut := append(dataPushes, hash...) + addrOut = append(addrOut, amountBuffer...) + addrOut = append(addrOut, limitsBuffer...) + + return addrOut, nil +} + +// GenerateSSGenBlockRef generates an OP_RETURN push for the block header hash and +// height which the block votes on. +func GenerateSSGenBlockRef(blockHash chainhash.Hash, height uint32) ([]byte, + error) { + // Prefix + dataPushes := []byte{ + 0x6a, // OP_RETURN + 0x24, // OP_DATA_36 + } + + // Serialize the block hash and height + blockHashBytes := blockHash.Bytes() + blockHeightBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(blockHeightBytes, height) + + blockData := append(blockHashBytes, blockHeightBytes...) + + // Concatenate the prefix and block data + blockDataOut := append(dataPushes, blockData...) + + return blockDataOut, nil +} + +// GenerateSSGenVotes generates an OP_RETURN push for the vote bits in an SSGen tx. +func GenerateSSGenVotes(votebits uint16) ([]byte, error) { + // Prefix + dataPushes := []byte{ + 0x6a, // OP_RETURN + 0x02, // OP_DATA_2 + } + + // Serialize the votebits + voteBitsBytes := make([]byte, 2) + binary.LittleEndian.PutUint16(voteBitsBytes, votebits) + + // Concatenate the prefix and vote bits + voteBitsOut := append(dataPushes, voteBitsBytes...) + + return voteBitsOut, nil +} + +// GenerateProvablyPruneableOut creates an OP_RETURN push of arbitrary data. +func GenerateProvablyPruneableOut(data []byte) ([]byte, error) { + return NewScriptBuilder().AddOp(OP_RETURN).AddData(data).Script() +} + // PayToAddrScript creates a new script to pay a transaction output to a the // specified address. -func PayToAddrScript(addr btcutil.Address) ([]byte, error) { +func PayToAddrScript(addr dcrutil.Address) ([]byte, error) { switch addr := addr.(type) { - case *btcutil.AddressPubKeyHash: + case *dcrutil.AddressPubKeyHash: if addr == nil { return nil, ErrUnsupportedAddress } - return payToPubKeyHashScript(addr.ScriptAddress()) + switch addr.DSA(addr.Net()) { + case chainec.ECTypeSecp256k1: + return payToPubKeyHashScript(addr.ScriptAddress()) + case chainec.ECTypeEdwards: + return payToPubKeyHashEdwardsScript(addr.ScriptAddress()) + case chainec.ECTypeSecSchnorr: + return payToPubKeyHashSchnorrScript(addr.ScriptAddress()) + } - case *btcutil.AddressScriptHash: + case *dcrutil.AddressScriptHash: if addr == nil { return nil, ErrUnsupportedAddress } return payToScriptHashScript(addr.ScriptAddress()) - case *btcutil.AddressPubKey: + case *dcrutil.AddressSecpPubKey: if addr == nil { return nil, ErrUnsupportedAddress } return payToPubKeyScript(addr.ScriptAddress()) + + case *dcrutil.AddressEdwardsPubKey: + if addr == nil { + return nil, ErrUnsupportedAddress + } + return payToEdwardsPubKeyScript(addr.ScriptAddress()) + + case *dcrutil.AddressSecSchnorrPubKey: + if addr == nil { + return nil, ErrUnsupportedAddress + } + return payToSchnorrPubKeyScript(addr.ScriptAddress()) } return nil, ErrUnsupportedAddress @@ -344,7 +1098,8 @@ func PayToAddrScript(addr btcutil.Address) ([]byte, error) { // nrequired of the keys in pubkeys are required to have signed the transaction // for success. An ErrBadNumRequired will be returned if nrequired is larger // than the number of keys provided. -func MultiSigScript(pubkeys []*btcutil.AddressPubKey, nrequired int) ([]byte, error) { +func MultiSigScript(pubkeys []*dcrutil.AddressSecpPubKey, nrequired int) ([]byte, + error) { if len(pubkeys) < nrequired { return nil, ErrBadNumRequired } @@ -378,12 +1133,33 @@ func PushedData(script []byte) ([][]byte, error) { return data, nil } +// GetMultisigMandN returns the number of public keys and the number of +// signatures required to redeem the multisignature script. +func GetMultisigMandN(script []byte) (uint8, uint8, error) { + // No valid addresses or required signatures if the script doesn't + // parse. + pops, err := parseScript(script) + if err != nil { + return 0, 0, err + } + + requiredSigs := uint8(asSmallInt(pops[0].opcode)) + numPubKeys := uint8(asSmallInt(pops[len(pops)-2].opcode)) + + return requiredSigs, numPubKeys, nil +} + // ExtractPkScriptAddrs returns the type of script, addresses and required // signatures associated with the passed PkScript. Note that it only works for // 'standard' transaction script types. Any data such as public keys which are // invalid are omitted from the results. -func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (ScriptClass, []btcutil.Address, int, error) { - var addrs []btcutil.Address +func ExtractPkScriptAddrs(version uint16, pkScript []byte, + chainParams *chaincfg.Params) (ScriptClass, []dcrutil.Address, int, error) { + if version != DefaultScriptVersion { + return NonStandardTy, nil, 0, fmt.Errorf("invalid script version") + } + + var addrs []dcrutil.Address var requiredSigs int // No valid addresses or required signatures if the script doesn't @@ -394,6 +1170,7 @@ func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (Script } scriptClass := typeOfScript(pops) + switch scriptClass { case PubKeyHashTy: // A pay-to-pubkey-hash script is of the form: @@ -401,8 +1178,21 @@ func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (Script // Therefore the pubkey hash is the 3rd item on the stack. // Skip the pubkey hash if it's invalid for some reason. requiredSigs = 1 - addr, err := btcutil.NewAddressPubKeyHash(pops[2].data, - chainParams) + addr, err := dcrutil.NewAddressPubKeyHash(pops[2].data, + chainParams, chainec.ECTypeSecp256k1) + if err == nil { + addrs = append(addrs, addr) + } + + case PubkeyHashAltTy: + // A pay-to-pubkey-hash script is of the form: + // OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIGALT + // Therefore the pubkey hash is the 3rd item on the stack. + // Skip the pubkey hash if it's invalid for some reason. + requiredSigs = 1 + suite, _ := ExtractPkScriptAltSigType(pkScript) + addr, err := dcrutil.NewAddressPubKeyHash(pops[2].data, + chainParams, suite) if err == nil { addrs = append(addrs, addr) } @@ -413,18 +1203,82 @@ func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (Script // Therefore the pubkey is the first item on the stack. // Skip the pubkey if it's invalid for some reason. requiredSigs = 1 - addr, err := btcutil.NewAddressPubKey(pops[0].data, chainParams) + addr, err := dcrutil.NewAddressSecpPubKey(pops[0].data, chainParams) if err == nil { addrs = append(addrs, addr) } + case PubkeyAltTy: + // A pay-to-pubkey alt script is of the form: + // OP_CHECKSIGALT + // Therefore the pubkey is the first item on the stack. + // Skip the pubkey if it's invalid for some reason. + requiredSigs = 1 + suite, _ := ExtractPkScriptAltSigType(pkScript) + var addr dcrutil.Address + err := fmt.Errorf("invalid signature suite for alt sig") + switch suite { + case chainec.ECTypeEdwards: + addr, err = dcrutil.NewAddressEdwardsPubKey(pops[0].data, + chainParams) + case chainec.ECTypeSecSchnorr: + addr, err = dcrutil.NewAddressSecSchnorrPubKey(pops[0].data, + chainParams) + } + if err == nil { + addrs = append(addrs, addr) + } + + case StakeSubmissionTy: + // A pay-to-stake-submission-hash script is of the form: + // OP_SSTX ... P2PKH or P2SH + var localAddrs []dcrutil.Address + _, localAddrs, requiredSigs, err = + ExtractPkScriptAddrs(version, getStakeOutSubscript(pkScript), + chainParams) + if err == nil { + addrs = append(addrs, localAddrs...) + } + + case StakeGenTy: + // A pay-to-stake-generation-hash script is of the form: + // OP_SSGEN ... P2PKH or P2SH + var localAddrs []dcrutil.Address + _, localAddrs, requiredSigs, err = ExtractPkScriptAddrs(version, + getStakeOutSubscript(pkScript), chainParams) + if err == nil { + addrs = append(addrs, localAddrs...) + } + + case StakeRevocationTy: + // A pay-to-stake-revocation-hash script is of the form: + // OP_SSRTX ... P2PKH or P2SH + var localAddrs []dcrutil.Address + _, localAddrs, requiredSigs, err = + ExtractPkScriptAddrs(version, getStakeOutSubscript(pkScript), + chainParams) + if err == nil { + addrs = append(addrs, localAddrs...) + } + + case StakeSubChangeTy: + // A pay-to-stake-submission-change-hash script is of the form: + // OP_SSTXCHANGE ... P2PKH or P2SH + var localAddrs []dcrutil.Address + _, localAddrs, requiredSigs, err = + ExtractPkScriptAddrs(version, getStakeOutSubscript(pkScript), + chainParams) + if err == nil { + addrs = append(addrs, localAddrs...) + } + case ScriptHashTy: // A pay-to-script-hash script is of the form: // OP_HASH160 OP_EQUAL // Therefore the script hash is the 2nd item on the stack. // Skip the script hash if it's invalid for some reason. requiredSigs = 1 - addr, err := btcutil.NewAddressScriptHashFromHash(pops[1].data, + addr, err := dcrutil.NewAddressScriptHashFromHash(pops[1].data, chainParams) if err == nil { addrs = append(addrs, addr) @@ -440,9 +1294,9 @@ func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (Script numPubKeys := asSmallInt(pops[len(pops)-2].opcode) // Extract the public keys while skipping any that are invalid. - addrs = make([]btcutil.Address, 0, numPubKeys) + addrs = make([]dcrutil.Address, 0, numPubKeys) for i := 0; i < numPubKeys; i++ { - addr, err := btcutil.NewAddressPubKey(pops[i+1].data, + addr, err := dcrutil.NewAddressSecpPubKey(pops[i+1].data, chainParams) if err == nil { addrs = append(addrs, addr) @@ -460,3 +1314,83 @@ func ExtractPkScriptAddrs(pkScript []byte, chainParams *chaincfg.Params) (Script return scriptClass, addrs, requiredSigs, nil } + +// extractOneBytePush returns the value of a one byte push. +func extractOneBytePush(po parsedOpcode) int { + if !isOneByteMaxDataPush(po) { + return -1 + } + + if po.opcode.value == OP_1 || + po.opcode.value == OP_2 || + po.opcode.value == OP_3 || + po.opcode.value == OP_4 || + po.opcode.value == OP_5 || + po.opcode.value == OP_6 || + po.opcode.value == OP_7 || + po.opcode.value == OP_8 || + po.opcode.value == OP_9 || + po.opcode.value == OP_10 || + po.opcode.value == OP_11 || + po.opcode.value == OP_12 || + po.opcode.value == OP_13 || + po.opcode.value == OP_14 || + po.opcode.value == OP_15 || + po.opcode.value == OP_16 { + return int(po.opcode.value - 80) + } + + return int(po.data[0]) +} + +// ExtractPkScriptAltSigType returns the signature scheme to use for an +// alternative check signature script. +func ExtractPkScriptAltSigType(pkScript []byte) (int, error) { + pops, err := parseScript(pkScript) + if err != nil { + return 0, err + } + + isPKA := isPubkeyAlt(pops) + isPKHA := isPubkeyHashAlt(pops) + if !(isPKA || isPKHA) { + return -1, fmt.Errorf("wrong script type") + } + + sigTypeLoc := 1 + if isPKHA { + sigTypeLoc = 4 + } + + valInt := extractOneBytePush(pops[sigTypeLoc]) + if valInt < 0 { + return 0, fmt.Errorf("bad type push") + } + val := sigTypes(valInt) + switch val { + case edwards: + return int(val), nil + case secSchnorr: + return int(val), nil + default: + break + } + + return -1, fmt.Errorf("bad signature scheme type") +} + +// GetNullDataContent returns the content of a NullData (OP_RETURN) data push +// and an error if the script is not a NullData script. +func GetNullDataContent(version uint16, pkScript []byte) ([]byte, error) { + class := GetScriptClass(version, pkScript) + if class != NullDataTy { + return nil, fmt.Errorf("not nulldata script") + } + + pops, err := parseScript(pkScript) + if err != nil { + return nil, fmt.Errorf("script parse failure") + } + + return pops[1].data, nil +} diff --git a/txscript/standard_test.go b/txscript/standard_test.go index dafd3560..9639ad1c 100644 --- a/txscript/standard_test.go +++ b/txscript/standard_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,9 +11,9 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcutil" + "github.com/decred/dcrd/chaincfg" + "github.com/decred/dcrd/txscript" + "github.com/decred/dcrutil" ) // decodeHex decodes the passed hex string and returns the resulting bytes. It @@ -42,12 +43,12 @@ func mustParseShortForm(script string) []byte { return s } -// newAddressPubKey returns a new btcutil.AddressPubKey from the provided +// newAddressPubKey returns a new dcrutil.AddressPubKey from the provided // serialized public key. It panics if an error occurs. This is only used in // the tests as a helper since the only way it can fail is if there is an error // in the test source code. -func newAddressPubKey(serializedPubKey []byte) btcutil.Address { - addr, err := btcutil.NewAddressPubKey(serializedPubKey, +func newAddressPubKey(serializedPubKey []byte) dcrutil.Address { + addr, err := dcrutil.NewAddressSecpPubKey(serializedPubKey, &chaincfg.MainNetParams) if err != nil { panic("invalid public key in test source") @@ -56,12 +57,13 @@ func newAddressPubKey(serializedPubKey []byte) btcutil.Address { return addr } -// newAddressPubKeyHash returns a new btcutil.AddressPubKeyHash from the +// newAddressPubKeyHash returns a new dcrutil.AddressPubKeyHash from the // provided hash. It panics if an error occurs. This is only used in the tests // as a helper since the only way it can fail is if there is an error in the // test source code. -func newAddressPubKeyHash(pkHash []byte) btcutil.Address { - addr, err := btcutil.NewAddressPubKeyHash(pkHash, &chaincfg.MainNetParams) +func newAddressPubKeyHash(pkHash []byte) dcrutil.Address { + addr, err := dcrutil.NewAddressPubKeyHash(pkHash, &chaincfg.MainNetParams, + secp) if err != nil { panic("invalid public key hash in test source") } @@ -69,12 +71,12 @@ func newAddressPubKeyHash(pkHash []byte) btcutil.Address { return addr } -// newAddressScriptHash returns a new btcutil.AddressScriptHash from the +// newAddressScriptHash returns a new dcrutil.AddressScriptHash from the // provided hash. It panics if an error occurs. This is only used in the tests // as a helper since the only way it can fail is if there is an error in the // test source code. -func newAddressScriptHash(scriptHash []byte) btcutil.Address { - addr, err := btcutil.NewAddressScriptHashFromHash(scriptHash, +func newAddressScriptHash(scriptHash []byte) dcrutil.Address { + addr, err := dcrutil.NewAddressScriptHashFromHash(scriptHash, &chaincfg.MainNetParams) if err != nil { panic("invalid script hash in test source") @@ -91,7 +93,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { tests := []struct { name string script []byte - addrs []btcutil.Address + addrs []dcrutil.Address reqSigs int class txscript.ScriptClass }{ @@ -99,7 +101,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { name: "standard p2pk with compressed pubkey (0x02)", script: decodeHex("2102192d74d0cb94344c9569c2e7790157" + "3d8d7903c3ebec3a957724895dca52c6b4ac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("02192d74d0cb94344" + "c9569c2e77901573d8d7903c3ebec3a95772" + "4895dca52c6b4")), @@ -113,7 +115,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb" + "84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643" + "f656b412a3ac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("0411db93e1dcdb8a0" + "16b49840f8c53bc1eb68a382e97b1482ecad" + "7b148a6909a5cb2e0eaddfb84ccf9744464f" + @@ -129,7 +131,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "3d8d7903c3ebec3a957724895dca52c6b40d45264838" + "c0bd96852662ce6a847b197376830160c6d2eb5e6a4c" + "44d33f453eac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("06192d74d0cb94344" + "c9569c2e77901573d8d7903c3ebec3a95772" + "4895dca52c6b40d45264838c0bd96852662c" + @@ -143,7 +145,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { name: "standard p2pk with compressed pubkey (0x03)", script: decodeHex("2103b0bd634234abbb1ba1e986e884185c" + "61cf43e001f9137f23c2c409273eb16e65ac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("03b0bd634234abbb1" + "ba1e986e884185c61cf43e001f9137f23c2c" + "409273eb16e65")), @@ -157,7 +159,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "61cf43e001f9137f23c2c409273eb16e6537a576782e" + "ba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c" + "1e0908ef7bac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("04b0bd634234abbb1" + "ba1e986e884185c61cf43e001f9137f23c2c" + "409273eb16e6537a576782eba668a7ef8bd3" + @@ -173,7 +175,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "61cf43e001f9137f23c2c409273eb16e6537a576782e" + "ba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c" + "1e0908ef7bac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("07b0bd634234abbb1" + "ba1e986e884185c61cf43e001f9137f23c2c" + "409273eb16e6537a576782eba668a7ef8bd3" + @@ -187,7 +189,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { name: "standard p2pkh", script: decodeHex("76a914ad06dd6ddee55cbca9a9e3713bd7" + "587509a3056488ac"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKeyHash(decodeHex("ad06dd6ddee55" + "cbca9a9e3713bd7587509a30564")), }, @@ -198,7 +200,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { name: "standard p2sh", script: decodeHex("a91463bcc565f9e68ee0189dd5cc67f1b0" + "e5f02f45cb87"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressScriptHash(decodeHex("63bcc565f9e68" + "ee0189dd5cc67f1b0e5f02f45cb")), }, @@ -215,7 +217,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "354d80e550078cb532a34bfa2fcfdeb7d76519aecc62" + "770f5b0e4ef8551946d8a540911abe3e7854a26f39f5" + "8b25c15342af52ae"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("04cc71eb30d653c0c" + "3163990c47b976f3fb3f37cccdcbedb169a1" + "dfef58bbfbfaff7d8a473e7e2e6d317b87ba" + @@ -243,7 +245,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "bbf781c5410d3f22a7a3a56ffefb2238af8627363bdf" + "2ed97c1f89784a1aecdb43384f11d2acc64443c7fc29" + "9cef0400421a53ae"), - addrs: []btcutil.Address{ + addrs: []dcrutil.Address{ newAddressPubKey(decodeHex("04cb9c3c222c5f7a7" + "d3b9bd152f363a0b6d54c9eb312c4d4f9af1" + "e8551b6c421a6a4ab0e29105f24de20ff463" + @@ -319,7 +321,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "6e20626520666f756e6420696e207472616e73616374" + "696f6e20366335336364393837313139656637393764" + "35616463636453ae"), - addrs: []btcutil.Address{}, + addrs: []dcrutil.Address{}, reqSigs: 1, class: txscript.MultiSigTy, }, @@ -335,7 +337,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { "39636634633033633630396335393363336539316665" + "64653730323921313233646434326432353633396433" + "38613663663530616234636434340a00000053ae"), - addrs: []btcutil.Address{}, + addrs: []dcrutil.Address{}, reqSigs: 1, class: txscript.MultiSigTy, }, @@ -358,7 +360,7 @@ func TestExtractPkScriptAddrs(t *testing.T) { t.Logf("Running %d tests.", len(tests)) for i, test := range tests { class, addrs, reqSigs, err := txscript.ExtractPkScriptAddrs( - test.script, &chaincfg.MainNetParams) + txscript.DefaultScriptVersion, test.script, &chaincfg.MainNetParams) if err != nil { } @@ -500,41 +502,57 @@ func TestCalcScriptInfo(t *testing.T) { } } -// bogusAddress implements the btcutil.Address interface so the tests can ensure +// bogusAddress implements the dcrutil.Address interface so the tests can ensure // unsupported address types are handled properly. type bogusAddress struct{} // EncodeAddress simply returns an empty string. It exists to satsify the -// btcutil.Address interface. +// dcrutil.Address interface. func (b *bogusAddress) EncodeAddress() string { return "" } // ScriptAddress simply returns an empty byte slice. It exists to satsify the -// btcutil.Address interface. +// dcrutil.Address interface. func (b *bogusAddress) ScriptAddress() []byte { return nil } -// IsForNet lies blatantly to satisfy the btcutil.Address interface. +// Hash160 simply returns an empty byte slice. It exists to satsify the +// dcrutil.Address interface. +func (b *bogusAddress) Hash160() *[20]byte { + return nil +} + +// IsForNet lies blatantly to satisfy the dcrutil.Address interface. func (b *bogusAddress) IsForNet(chainParams *chaincfg.Params) bool { return true // why not? } // String simply returns an empty string. It exists to satsify the -// btcutil.Address interface. +// dcrutil.Address interface. func (b *bogusAddress) String() string { return "" } +// DSA returns -1. +func (b *bogusAddress) DSA(chainParams *chaincfg.Params) int { + return -1 +} + +// Net returns &chaincfg.TestNetParams. +func (b *bogusAddress) Net() *chaincfg.Params { + return &chaincfg.TestNetParams +} + // TestPayToAddrScript ensures the PayToAddrScript function generates the // correct scripts for the various types of addresses. func TestPayToAddrScript(t *testing.T) { t.Parallel() // 1MirQ9bwyQcGVJPwKUgapu5ouK2E2Ey4gX - p2pkhMain, err := btcutil.NewAddressPubKeyHash(decodeHex("e34cce70c863"+ - "73273efcc54ce7d2a491bb4a0e84"), &chaincfg.MainNetParams) + p2pkhMain, err := dcrutil.NewAddressPubKeyHash(decodeHex("e34cce70c863"+ + "73273efcc54ce7d2a491bb4a0e84"), &chaincfg.MainNetParams, secp) if err != nil { t.Errorf("Unable to create public key hash address: %v", err) return @@ -542,7 +560,7 @@ func TestPayToAddrScript(t *testing.T) { // Taken from transaction: // b0539a45de13b3e0403909b8bd1a555b8cbe45fd4e3f3fda76f3a5f52835c29d - p2shMain, _ := btcutil.NewAddressScriptHashFromHash(decodeHex("e8c300"+ + p2shMain, _ := dcrutil.NewAddressScriptHashFromHash(decodeHex("e8c300"+ "c87986efa84c37c0519929019ef86eb5b4"), &chaincfg.MainNetParams) if err != nil { t.Errorf("Unable to create script hash address: %v", err) @@ -550,7 +568,7 @@ func TestPayToAddrScript(t *testing.T) { } // mainnet p2pk 13CG6SJ3yHUXo4Cr2RY4THLLJrNFuG3gUg - p2pkCompressedMain, err := btcutil.NewAddressPubKey(decodeHex("02192d74"+ + p2pkCompressedMain, err := dcrutil.NewAddressSecpPubKey(decodeHex("02192d74"+ "d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &chaincfg.MainNetParams) if err != nil { @@ -558,7 +576,7 @@ func TestPayToAddrScript(t *testing.T) { err) return } - p2pkCompressed2Main, err := btcutil.NewAddressPubKey(decodeHex("03b0bd"+ + p2pkCompressed2Main, err := dcrutil.NewAddressSecpPubKey(decodeHex("03b0bd"+ "634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &chaincfg.MainNetParams) if err != nil { @@ -567,7 +585,7 @@ func TestPayToAddrScript(t *testing.T) { return } - p2pkUncompressedMain, err := btcutil.NewAddressPubKey(decodeHex("0411db"+ + p2pkUncompressedMain, err := dcrutil.NewAddressSecpPubKey(decodeHex("0411db"+ "93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2"+ "e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3"), &chaincfg.MainNetParams) @@ -578,52 +596,52 @@ func TestPayToAddrScript(t *testing.T) { } tests := []struct { - in btcutil.Address + in dcrutil.Address expected string err error }{ - // pay-to-pubkey-hash address on mainnet + // pay-to-pubkey-hash address on mainnet 0 { p2pkhMain, "DUP HASH160 DATA_20 0xe34cce70c86373273efcc54ce7d2a4" + "91bb4a0e8488 CHECKSIG", nil, }, - // pay-to-script-hash address on mainnet + // pay-to-script-hash address on mainnet 1 { p2shMain, "HASH160 DATA_20 0xe8c300c87986efa84c37c0519929019ef8" + "6eb5b4 EQUAL", nil, }, - // pay-to-pubkey address on mainnet. compressed key. + // pay-to-pubkey address on mainnet. compressed key. 2 { p2pkCompressedMain, "DATA_33 0x02192d74d0cb94344c9569c2e77901573d8d7903c3" + "ebec3a957724895dca52c6b4 CHECKSIG", nil, }, - // pay-to-pubkey address on mainnet. compressed key (other way). + // pay-to-pubkey address on mainnet. compressed key (other way). 3 { p2pkCompressed2Main, "DATA_33 0x03b0bd634234abbb1ba1e986e884185c61cf43e001" + "f9137f23c2c409273eb16e65 CHECKSIG", nil, }, - // pay-to-pubkey address on mainnet. uncompressed key. + // pay-to-pubkey address on mainnet. for decred this would + // be uncompressed, but standard for decred is 33 byte + // compressed public keys. { p2pkUncompressedMain, - "DATA_65 0x0411db93e1dcdb8a016b49840f8c53bc1eb68a382e" + - "97b1482ecad7b148a6909a5cb2e0eaddfb84ccf97444" + - "64f82e160bfa9b8b64f9d4c03f999b8643f656b412a3 " + - "CHECKSIG", + "DATA_33 0x0311db93e1dcdb8a016b49840f8c53bc1eb68a382e97b" + + "1482ecad7b148a6909a5cac", nil, }, // Supported address types with nil pointers. - {(*btcutil.AddressPubKeyHash)(nil), "", txscript.ErrUnsupportedAddress}, - {(*btcutil.AddressScriptHash)(nil), "", txscript.ErrUnsupportedAddress}, - {(*btcutil.AddressPubKey)(nil), "", txscript.ErrUnsupportedAddress}, + {(*dcrutil.AddressPubKeyHash)(nil), "", txscript.ErrUnsupportedAddress}, + {(*dcrutil.AddressScriptHash)(nil), "", txscript.ErrUnsupportedAddress}, + {(*dcrutil.AddressSecpPubKey)(nil), "", txscript.ErrUnsupportedAddress}, // Unsupported address type. {&bogusAddress{}, "", txscript.ErrUnsupportedAddress}, @@ -653,7 +671,7 @@ func TestMultiSigScript(t *testing.T) { t.Parallel() // mainnet p2pk 13CG6SJ3yHUXo4Cr2RY4THLLJrNFuG3gUg - p2pkCompressedMain, err := btcutil.NewAddressPubKey(decodeHex("02192d7"+ + p2pkCompressedMain, err := dcrutil.NewAddressSecpPubKey(decodeHex("02192d7"+ "4d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"), &chaincfg.MainNetParams) if err != nil { @@ -661,7 +679,7 @@ func TestMultiSigScript(t *testing.T) { err) return } - p2pkCompressed2Main, err := btcutil.NewAddressPubKey(decodeHex("03b0bd"+ + p2pkCompressed2Main, err := dcrutil.NewAddressSecpPubKey(decodeHex("03b0bd"+ "634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"), &chaincfg.MainNetParams) if err != nil { @@ -670,7 +688,7 @@ func TestMultiSigScript(t *testing.T) { return } - p2pkUncompressedMain, err := btcutil.NewAddressPubKey(decodeHex("0411d"+ + p2pkUncompressedMain, err := dcrutil.NewAddressSecpPubKey(decodeHex("0411d"+ "b93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b41"+ "2a3"), &chaincfg.MainNetParams) @@ -681,13 +699,13 @@ func TestMultiSigScript(t *testing.T) { } tests := []struct { - keys []*btcutil.AddressPubKey + keys []*dcrutil.AddressSecpPubKey nrequired int expected string err error }{ { - []*btcutil.AddressPubKey{ + []*dcrutil.AddressSecpPubKey{ p2pkCompressedMain, p2pkCompressed2Main, }, @@ -699,7 +717,7 @@ func TestMultiSigScript(t *testing.T) { nil, }, { - []*btcutil.AddressPubKey{ + []*dcrutil.AddressSecpPubKey{ p2pkCompressedMain, p2pkCompressed2Main, }, @@ -711,7 +729,7 @@ func TestMultiSigScript(t *testing.T) { nil, }, { - []*btcutil.AddressPubKey{ + []*dcrutil.AddressSecpPubKey{ p2pkCompressedMain, p2pkCompressed2Main, }, @@ -720,18 +738,17 @@ func TestMultiSigScript(t *testing.T) { txscript.ErrBadNumRequired, }, { - []*btcutil.AddressPubKey{ + // By default compressed pubkeys are used in Decred. + []*dcrutil.AddressSecpPubKey{ p2pkUncompressedMain, }, 1, - "1 DATA_65 0x0411db93e1dcdb8a016b49840f8c53bc1eb68a382" + - "e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf97444" + - "64f82e160bfa9b8b64f9d4c03f999b8643f656b412a3 " + - "1 CHECKMULTISIG", + "1 DATA_33 0x0311db93e1dcdb8a016b49840f8c53bc1eb68a3" + + "82e97b1482ecad7b148a6909a5c 1 CHECKMULTISIG", nil, }, { - []*btcutil.AddressPubKey{ + []*dcrutil.AddressSecpPubKey{ p2pkUncompressedMain, }, 2, @@ -872,10 +889,12 @@ var scriptClassTests = []scriptClassTest{ // Nulldata with more than max allowed data (so therefore // nonstandard) name: "nulldata4", - script: "RETURN PUSHDATA1 0x51 0x046708afdb0fe5548271967f1a67" + - "130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef3" + - "046708afdb0fe5548271967f1a67130b7105cd6a828e03909a67" + - "962e0ea1f61deb649f6bc3f4cef308", + script: "RETURN PUSHDATA2 0x1801 0x046708afdb0fe5548271967f1a670" + + "46708afdb0fe5548271967f1a67046708afdb0fe5548271967f1a670467" + + "08afdb0fe5548271967f1a67046708afdb0fe5548271967f1a67046708a" + + "fdb0fe5548271967f1a67046708afdb0fe5548271967f1a67046708afdb" + + "0fe5548271967f1a67046708afdb0fe5548271967f1a67046708afdb0fe" + + "5548271967f1a67", class: txscript.NonStandardTy, }, { @@ -928,6 +947,18 @@ var scriptClassTests = []scriptClassTest{ script: "DATA_5 0x01020304", class: txscript.NonStandardTy, }, + { + name: "multisig script with wrong number of pubkeys", + script: "2 " + + "DATA_33 " + + "0x027adf5df7c965a2d46203c781bd4dd8" + + "21f11844136f6673af7cc5a4a05cd29380 " + + "DATA_33 " + + "0x02c08f3de8ee2de9be7bd770f4c10eb0" + + "d6ff1dd81ee96eedd3a9d4aeaf86695e80 " + + "3 CHECKMULTISIG", + class: txscript.NonStandardTy, + }, } // TestScriptClass ensures all the scripts in scriptClassTests have the expected @@ -937,7 +968,7 @@ func TestScriptClass(t *testing.T) { for _, test := range scriptClassTests { script := mustParseShortForm(test.script) - class := txscript.GetScriptClass(script) + class := txscript.GetScriptClass(txscript.DefaultScriptVersion, script) if class != test.class { t.Errorf("%s: expected %s got %s", test.name, test.class, class) diff --git a/upgrade.go b/upgrade.go index 4140f41c..8f874795 100644 --- a/upgrade.go +++ b/upgrade.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -25,20 +26,20 @@ func dirEmpty(dirPath string) (bool, error) { return len(names) == 0, nil } -// oldBtcdHomeDir returns the OS specific home directory btcd used prior to -// version 0.3.3. This has since been replaced with btcutil.AppDataDir, but +// oldDcrdHomeDir returns the OS specific home directory dcrd used prior to +// version 0.3.3. This has since been replaced with dcrutil.AppDataDir, but // this function is still provided for the automatic upgrade path. -func oldBtcdHomeDir() string { +func oldDcrdHomeDir() string { // Search for Windows APPDATA first. This won't exist on POSIX OSes. appData := os.Getenv("APPDATA") if appData != "" { - return filepath.Join(appData, "btcd") + return filepath.Join(appData, "dcrd") } // Fall back to standard HOME directory that works for most POSIX OSes. home := os.Getenv("HOME") if home != "" { - return filepath.Join(home, ".btcd") + return filepath.Join(home, ".dcrd") } // In the worst case, use the current directory. @@ -46,7 +47,7 @@ func oldBtcdHomeDir() string { } // upgradeDBPathNet moves the database for a specific network from its -// location prior to btcd version 0.2.0 and uses heuristics to ascertain the old +// location prior to version 0.2.0 and uses heuristics to ascertain the old // database type to rename to the new format. func upgradeDBPathNet(oldDbPath, netName string) error { // Prior to version 0.2.0, the database was named the same thing for @@ -61,7 +62,7 @@ func upgradeDBPathNet(oldDbPath, netName string) error { } // The new database name is based on the database type and - // resides in a directory named after the network type. + // resides in the a directory named after the network type. newDbRoot := filepath.Join(filepath.Dir(cfg.DataDir), netName) newDbName := blockDbNamePrefix + "_" + oldDbType if oldDbType == "sqlite" { @@ -85,17 +86,17 @@ func upgradeDBPathNet(oldDbPath, netName string) error { return nil } -// upgradeDBPaths moves the databases from their locations prior to btcd +// upgradeDBPaths moves the databases from their locations prior to dcrd // version 0.2.0 to their new locations. func upgradeDBPaths() error { // Prior to version 0.2.0, the databases were in the "db" directory and // their names were suffixed by "testnet" and "regtest" for their // respective networks. Check for the old database and update it to the - // new path introduced with version 0.2.0 accordingly. - oldDbRoot := filepath.Join(oldBtcdHomeDir(), "db") - upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd.db"), "mainnet") - upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd_testnet.db"), "testnet") - upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd_regtest.db"), "regtest") + // new path introduced with version 0.2.0 accodingly. + oldDbRoot := filepath.Join(oldDcrdHomeDir(), "db") + upgradeDBPathNet(filepath.Join(oldDbRoot, "dcrd.db"), "mainnet") + upgradeDBPathNet(filepath.Join(oldDbRoot, "dcrd_testnet.db"), "testnet") + upgradeDBPathNet(filepath.Join(oldDbRoot, "dcrd_regtest.db"), "regtest") // Remove the old db directory. err := os.RemoveAll(oldDbRoot) @@ -106,12 +107,12 @@ func upgradeDBPaths() error { return nil } -// upgradeDataPaths moves the application data from its location prior to btcd +// upgradeDataPaths moves the application data from its location prior to // version 0.3.3 to its new location. func upgradeDataPaths() error { // No need to migrate if the old and new home paths are the same. - oldHomePath := oldBtcdHomeDir() - newHomePath := btcdHomeDir + oldHomePath := oldDcrdHomeDir() + newHomePath := dcrdHomeDir if oldHomePath == newHomePath { return nil } @@ -119,14 +120,14 @@ func upgradeDataPaths() error { // Only migrate if the old path exists and the new one doesn't. if fileExists(oldHomePath) && !fileExists(newHomePath) { // Create the new path. - btcdLog.Infof("Migrating application home path from '%s' to '%s'", + dcrdLog.Infof("Migrating application home path from '%s' to '%s'", oldHomePath, newHomePath) err := os.MkdirAll(newHomePath, 0700) if err != nil { return err } - // Move old btcd.conf into new location if needed. + // Move old dcrd.conf into new location if needed. oldConfPath := filepath.Join(oldHomePath, defaultConfigFilename) newConfPath := filepath.Join(newHomePath, defaultConfigFilename) if fileExists(oldConfPath) && !fileExists(newConfPath) { @@ -157,7 +158,7 @@ func upgradeDataPaths() error { return err } } else { - btcdLog.Warnf("Not removing '%s' since it contains files "+ + dcrdLog.Warnf("Not removing '%s' since it contains files "+ "not created by this application. You may "+ "want to manually move them or delete them.", oldHomePath) @@ -167,7 +168,7 @@ func upgradeDataPaths() error { return nil } -// doUpgrades performs upgrades to btcd as new versions require it. +// doUpgrades performs upgrades to dcrd as new versions require it. func doUpgrades() error { err := upgradeDBPaths() if err != nil { diff --git a/version.go b/version.go index c9a7a3d3..6eea8417 100644 --- a/version.go +++ b/version.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -17,7 +18,7 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr // versioning 2.0.0 spec (http://semver.org/). const ( appMajor uint = 0 - appMinor uint = 11 + appMinor uint = 0 appPatch uint = 1 // appPreRelease MUST only contain characters from semanticAlphabet diff --git a/wire/README.md b/wire/README.md index 9bdd2b87..2df698f9 100644 --- a/wire/README.md +++ b/wire/README.md @@ -1,72 +1,71 @@ wire ==== -[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)] -(https://travis-ci.org/btcsuite/btcd) [![ISC License] +[![ISC License] (http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -Package wire implements the bitcoin wire protocol. A comprehensive suite of +Package wire implements the decred wire protocol. A comprehensive suite of tests with 100% test coverage is provided to ensure proper functionality. There is an associated blog post about the release of this package -[here](https://blog.conformal.com/btcwire-the-bitcoin-wire-protocol-package-from-btcd/). +[here](https://blog.conformal.com/btcwire-the-bitcoin-wire-protocol-package-from-dcrd/). This package has intentionally been designed so it can be used as a standalone -package for any projects needing to interface with bitcoin peers at the wire +package for any projects needing to interface with decred peers at the wire protocol level. ## Documentation [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/btcsuite/btcd/wire) +(http://godoc.org/github.com/decred/dcrd/wire) Full `go doc` style documentation for the project can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/btcsuite/btcd/wire +http://godoc.org/github.com/decred/dcrd/wire You can also view the documentation locally once the package is installed with the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/btcsuite/btcd/wire +http://localhost:6060/pkg/github.com/decred/dcrd/wire ## Installation ```bash -$ go get github.com/btcsuite/btcd/wire +$ go get github.com/decred/dcrd/wire ``` -## Bitcoin Message Overview +## Decred Message Overview -The bitcoin protocol consists of exchanging messages between peers. Each message +The decred protocol consists of exchanging messages between peers. Each message is preceded by a header which identifies information about it such as which -bitcoin network it is a part of, its type, how big it is, and a checksum to +decred network it is a part of, its type, how big it is, and a checksum to verify validity. All encoding and decoding of message headers is handled by this package. -To accomplish this, there is a generic interface for bitcoin messages named +To accomplish this, there is a generic interface for decred messages named `Message` which allows messages of any type to be read, written, or passed around through channels, functions, etc. In addition, concrete implementations -of most of the currently supported bitcoin messages are provided. For these +of most of the currently supported decred messages are provided. For these supported messages, all of the details of marshalling and unmarshalling to and -from the wire using bitcoin encoding are handled so the caller doesn't have to +from the wire using decred encoding are handled so the caller doesn't have to concern themselves with the specifics. ## Reading Messages Example -In order to unmarshal bitcoin messages from the wire, use the `ReadMessage` +In order to unmarshal decred messages from the wire, use the `ReadMessage` function. It accepts any `io.Reader`, but typically this will be a `net.Conn` -to a remote node running a bitcoin peer. Example syntax is: +to a remote node running a decred peer. Example syntax is: ```Go // Use the most recent protocol version supported by the package and the - // main bitcoin network. + // main decred network. pver := wire.ProtocolVersion - btcnet := wire.MainNet + dcrnet := wire.MainNet - // Reads and validates the next bitcoin message from conn using the - // protocol version pver and the bitcoin network btcnet. The returns + // Reads and validates the next decred message from conn using the + // protocol version pver and the decred network dcrnet. The returns // are a wire.Message, a []byte which contains the unmarshalled // raw payload, and a possible error. - msg, rawPayload, err := wire.ReadMessage(conn, pver, btcnet) + msg, rawPayload, err := wire.ReadMessage(conn, pver, dcrnet) if err != nil { // Log and handle the error } @@ -76,49 +75,29 @@ See the package documentation for details on determining the message type. ## Writing Messages Example -In order to marshal bitcoin messages to the wire, use the `WriteMessage` +In order to marshal decred messages to the wire, use the `WriteMessage` function. It accepts any `io.Writer`, but typically this will be a `net.Conn` -to a remote node running a bitcoin peer. Example syntax to request addresses +to a remote node running a decred peer. Example syntax to request addresses from a remote peer is: ```Go // Use the most recent protocol version supported by the package and the - // main bitcoin network. + // main decred network. pver := wire.ProtocolVersion - btcnet := wire.MainNet + dcrnet := wire.MainNet - // Create a new getaddr bitcoin message. + // Create a new getaddr decred message. msg := wire.NewMsgGetAddr() - // Writes a bitcoin message msg to conn using the protocol version - // pver, and the bitcoin network btcnet. The return is a possible + // Writes a decred message msg to conn using the protocol version + // pver, and the decred network dcrnet. The return is a possible // error. - err := wire.WriteMessage(conn, msg, pver, btcnet) + err := wire.WriteMessage(conn, msg, pver, dcrnet) if err != nil { // Log and handle the error } ``` -## GPG Verification Key - -All official release tags are signed by Conformal so users can ensure the code -has not been tampered with and is coming from the btcsuite developers. To -verify the signature perform the following: - -- Download the public key from the Conformal website at - https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt - -- Import the public key into your GPG keyring: - ```bash - gpg --import GIT-GPG-KEY-conformal.txt - ``` - -- Verify the release tag with the following command where `TAG_NAME` is a - placeholder for the specific tag: - ```bash - git tag -v TAG_NAME - ``` - ## License Package wire is licensed under the [copyfree](http://copyfree.org) ISC diff --git a/wire/bench_test.go b/wire/bench_test.go index ffae564c..b5046ac6 100644 --- a/wire/bench_test.go +++ b/wire/bench_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,6 +10,8 @@ import ( "io/ioutil" "testing" "time" + + "github.com/decred/dcrd/chaincfg/chainhash" ) // genesisCoinbaseTx is the coinbase transaction for the genesis blocks for @@ -18,7 +21,7 @@ var genesisCoinbaseTx = MsgTx{ TxIn: []*TxIn{ { PreviousOutPoint: OutPoint{ - Hash: ShaHash{}, + Hash: chainhash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -59,13 +62,13 @@ var genesisCoinbaseTx = MsgTx{ var blockOne = MsgBlock{ Header: BlockHeader{ Version: 1, - PrevBlock: ShaHash([HashSize]byte{ // Make go vet happy. + PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, }), - MerkleRoot: ShaHash([HashSize]byte{ // Make go vet happy. + MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, @@ -82,7 +85,7 @@ var blockOne = MsgBlock{ TxIn: []*TxIn{ { PreviousOutPoint: OutPoint{ - Hash: ShaHash{}, + Hash: chainhash.Hash{}, Index: 0xffffffff, }, SignatureScript: []byte{ @@ -228,7 +231,7 @@ func BenchmarkReadOutPoint(b *testing.B) { } var op OutPoint for i := 0; i < b.N; i++ { - readOutPoint(bytes.NewReader(buf), 0, 0, &op) + ReadOutPoint(bytes.NewReader(buf), 0, 0, &op) } } @@ -236,11 +239,11 @@ func BenchmarkReadOutPoint(b *testing.B) { // transaction output point. func BenchmarkWriteOutPoint(b *testing.B) { op := &OutPoint{ - Hash: ShaHash{}, + Hash: chainhash.Hash{}, Index: 0, } for i := 0; i < b.N; i++ { - writeOutPoint(ioutil.Discard, 0, 0, op) + WriteOutPoint(ioutil.Discard, 0, 0, op) } } @@ -292,7 +295,7 @@ func BenchmarkReadTxIn(b *testing.B) { } var txIn TxIn for i := 0; i < b.N; i++ { - readTxIn(bytes.NewReader(buf), 0, 0, &txIn) + readTxInPrefix(bytes.NewReader(buf), 0, 0, &txIn) } } @@ -301,7 +304,7 @@ func BenchmarkReadTxIn(b *testing.B) { func BenchmarkWriteTxIn(b *testing.B) { txIn := blockOne.Transactions[0].TxIn[0] for i := 0; i < b.N; i++ { - writeTxIn(ioutil.Discard, 0, 0, txIn) + writeTxInPrefix(ioutil.Discard, 0, 0, txIn) } } @@ -393,9 +396,9 @@ func BenchmarkTxSha(b *testing.B) { } } -// BenchmarkDoubleSha256 performs a benchmark on how long it takes to perform a -// double sha 256 returning a byte slice. -func BenchmarkDoubleSha256(b *testing.B) { +// BenchmarkHashFuncB performs a benchmark on how long it takes to perform a +// hash returning a byte slice. +func BenchmarkHashFuncB(b *testing.B) { b.StopTimer() var buf bytes.Buffer if err := genesisCoinbaseTx.Serialize(&buf); err != nil { @@ -406,13 +409,13 @@ func BenchmarkDoubleSha256(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - _ = DoubleSha256(txBytes) + _ = chainhash.HashFuncB(txBytes) } } -// BenchmarkDoubleSha256SH performs a benchmark on how long it takes to perform -// a double sha 256 returning a ShaHash. -func BenchmarkDoubleSha256SH(b *testing.B) { +// BenchmarkHashFuncH performs a benchmark on how long it takes to perform +// a hash returning a Hash. +func BenchmarkHashFuncH(b *testing.B) { b.StopTimer() var buf bytes.Buffer if err := genesisCoinbaseTx.Serialize(&buf); err != nil { @@ -423,6 +426,6 @@ func BenchmarkDoubleSha256SH(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - _ = DoubleSha256SH(txBytes) + _ = chainhash.HashFuncH(txBytes) } } diff --git a/wire/blockheader.go b/wire/blockheader.go index 145ab452..6dff9af9 100644 --- a/wire/blockheader.go +++ b/wire/blockheader.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,52 +9,89 @@ import ( "bytes" "io" "time" + + "github.com/decred/dcrd/chaincfg/chainhash" ) -// BlockVersion is the current latest supported block version. -const BlockVersion = 3 +// Version 4 bytes + Bits 4 bytes + PrevBlock and MerkleRoot hashes + 32 StakeRoot +// bytes + 2 VoteBits bytes + 6 FinalState bytes + 2 Voters bytes + +// 1 FreshStake byte + 1 Revocations byte + 8 SBits bytes + 4 PoolSize bytes + +// 4 Height bytes + 4 Size bytes + Timestamp 4 bytes + 4 bytes nonce. +// --> Total 180 bytes. +const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2) + 64 + 36 -// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes + -// PrevBlock and MerkleRoot hashes. -const MaxBlockHeaderPayload = 16 + (HashSize * 2) - -// BlockHeader defines information about a block and is used in the bitcoin +// BlockHeader defines information about a block and is used in the decred // block (MsgBlock) and headers (MsgHeaders) messages. type BlockHeader struct { // Version of the block. This is not the same as the protocol version. Version int32 // Hash of the previous block in the block chain. - PrevBlock ShaHash + PrevBlock chainhash.Hash // Merkle tree reference to hash of all transactions for the block. - MerkleRoot ShaHash + MerkleRoot chainhash.Hash + + // Merkle tree reference to hash of all stake transactions for the block. + StakeRoot chainhash.Hash + + // Votes on the previous merkleroot and yet undecided parameters. (TODO) + VoteBits uint16 + + // Final state of the PRNG used for ticket selection in the lottery. + FinalState [6]byte + + // Number of participating voters for this block. + Voters uint16 + + // Number of new sstx in this block. + FreshStake uint8 + + // Number of ssrtx present in this block. + Revocations uint8 + + // Size of the ticket pool. + PoolSize uint32 + + // Difficulty target for the block. + Bits uint32 + + // Stake difficulty target. + SBits int64 + + // Height is the block height in the block chain. + Height uint32 + + // Size is the size of the serialized block in its entirety. + Size uint32 // Time the block was created. This is, unfortunately, encoded as a // uint32 on the wire and therefore is limited to 2106. Timestamp time.Time - // Difficulty target for the block. - Bits uint32 - - // Nonce used to generate the block. + // Nonce is technically a part of ExtraData, but we use it as the + // classical 4-byte nonce here. Nonce uint32 + + // ExtraData is used to encode the nonce or any other extra data + // that might be used later on in consensus. + ExtraData [36]byte } // blockHeaderLen is a constant that represents the number of bytes for a block // header. -const blockHeaderLen = 80 +const blockHeaderLen = 180 // BlockSha computes the block identifier hash for the given block header. -func (h *BlockHeader) BlockSha() ShaHash { - // Encode the header and double sha256 everything prior to the number of +func (h *BlockHeader) BlockSha() chainhash.Hash { + // Encode the header and hash256 everything prior to the number of // transactions. Ignore the error returns since there is no way the // encode could fail except being out of memory which would cause a // run-time panic. var buf bytes.Buffer _ = writeBlockHeader(&buf, 0, h) - return DoubleSha256SH(buf.Bytes()) + return chainhash.HashFuncH(buf.Bytes()) } // Deserialize decodes a block header from r into the receiver using a format @@ -76,31 +114,77 @@ func (h *BlockHeader) Serialize(w io.Writer) error { return writeBlockHeader(w, 0, h) } +// Bytes returns a byte slice containing the serialized contents of the block +// header. +func (h *BlockHeader) Bytes() ([]byte, error) { + // Serialize the MsgBlock. + var w bytes.Buffer + err := h.Serialize(&w) + if err != nil { + return nil, err + } + serializedBlockHeader := w.Bytes() + + // Cache the serialized bytes and return them. + return serializedBlockHeader, nil +} + // NewBlockHeader returns a new BlockHeader using the provided previous block // hash, merkle root hash, difficulty bits, and nonce used to generate the // block with defaults for the remaining fields. -func NewBlockHeader(prevHash *ShaHash, merkleRootHash *ShaHash, bits uint32, - nonce uint32) *BlockHeader { +func NewBlockHeader(version int32, prevHash *chainhash.Hash, + merkleRootHash *chainhash.Hash, stakeRoot *chainhash.Hash, voteBits uint16, + finalState [6]byte, voters uint16, freshStake uint8, revocations uint8, + poolsize uint32, bits uint32, sbits int64, height uint32, size uint32, + nonce uint32, extraData [36]byte) *BlockHeader { // Limit the timestamp to one second precision since the protocol // doesn't support better. return &BlockHeader{ - Version: BlockVersion, - PrevBlock: *prevHash, - MerkleRoot: *merkleRootHash, - Timestamp: time.Unix(time.Now().Unix(), 0), - Bits: bits, - Nonce: nonce, + Version: version, + PrevBlock: *prevHash, + MerkleRoot: *merkleRootHash, + StakeRoot: *stakeRoot, + VoteBits: voteBits, + FinalState: finalState, + Voters: voters, + FreshStake: freshStake, + Revocations: revocations, + PoolSize: poolsize, + Bits: bits, + SBits: sbits, + Height: height, + Size: size, + Timestamp: time.Unix(time.Now().Unix(), 0), + Nonce: nonce, + ExtraData: extraData, } } -// readBlockHeader reads a bitcoin block header from r. See Deserialize for +// readBlockHeader reads a decred block header from r. See Deserialize for // decoding block headers stored to disk, such as in a database, as opposed to // decoding from the wire. func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { var sec uint32 - err := readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot, &sec, - &bh.Bits, &bh.Nonce) + err := readElements( + r, + &bh.Version, + &bh.PrevBlock, + &bh.MerkleRoot, + &bh.StakeRoot, + &bh.VoteBits, + &bh.FinalState, + &bh.Voters, + &bh.FreshStake, + &bh.Revocations, + &bh.PoolSize, + &bh.Bits, + &bh.SBits, + &bh.Height, + &bh.Size, + &sec, + &bh.Nonce, + &bh.ExtraData) if err != nil { return err } @@ -109,13 +193,31 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { return nil } -// writeBlockHeader writes a bitcoin block header to w. See Serialize for +// writeBlockHeader writes a decred block header to w. See Serialize for // encoding block headers to be stored to disk, such as in a database, as // opposed to encoding for the wire. +// TODO: make sure serializing/writing is actually correct w/r/t dereferencing func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { sec := uint32(bh.Timestamp.Unix()) - err := writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot, - sec, bh.Bits, bh.Nonce) + err := writeElements( + w, + bh.Version, + &bh.PrevBlock, + &bh.MerkleRoot, + &bh.StakeRoot, + bh.VoteBits, + bh.FinalState, + bh.Voters, + bh.FreshStake, + bh.Revocations, + bh.PoolSize, + bh.Bits, + bh.SBits, + bh.Height, + bh.Size, + sec, + bh.Nonce, + bh.ExtraData) if err != nil { return err } diff --git a/wire/blockheader_test.go b/wire/blockheader_test.go index dfbcb1a8..7476c9b0 100644 --- a/wire/blockheader_test.go +++ b/wire/blockheader_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -6,12 +7,14 @@ package wire_test import ( "bytes" + "encoding/hex" "reflect" "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestBlockHeader tests the BlockHeader API. @@ -24,8 +27,36 @@ func TestBlockHeader(t *testing.T) { hash := mainNetGenesisHash merkleHash := mainNetGenesisMerkleRoot + votebits := uint16(0x0000) bits := uint32(0x1d00ffff) - bh := wire.NewBlockHeader(&hash, &merkleHash, bits, nonce) + finalState := [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + voters := uint16(0x0000) + freshstake := uint8(0x00) + revocations := uint8(0x00) + poolsize := uint32(0) + sbits := int64(0x0000000000000000) + blockHeight := uint32(0) + blockSize := uint32(0) + extraData := [36]byte{} + + bh := wire.NewBlockHeader( + 1, // verision + &hash, + &merkleHash, + &merkleHash, // stakeRoot + votebits, + finalState, + voters, + freshstake, + revocations, + poolsize, + bits, + sbits, + blockHeight, + blockSize, + nonce, + extraData, + ) // Ensure we get the same data back out. if !bh.PrevBlock.IsEqual(&hash) { @@ -36,10 +67,42 @@ func TestBlockHeader(t *testing.T) { t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", spew.Sprint(bh.MerkleRoot), spew.Sprint(merkleHash)) } + if !bh.StakeRoot.IsEqual(&merkleHash) { + t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", + spew.Sprint(bh.MerkleRoot), spew.Sprint(merkleHash)) + } + if bh.VoteBits != votebits { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.FinalState != finalState { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.Voters != voters { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.FreshStake != freshstake { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.Revocations != revocations { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.PoolSize != poolsize { + t.Errorf("NewBlockHeader: wrong PoolSize - got %v, want %v", + bh.PoolSize, poolsize) + } if bh.Bits != bits { t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", bh.Bits, bits) } + if bh.SBits != sbits { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } if bh.Nonce != nonce { t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v", bh.Nonce, nonce) @@ -51,15 +114,41 @@ func TestBlockHeader(t *testing.T) { func TestBlockHeaderWire(t *testing.T) { nonce := uint32(123123) // 0x1e0f3 + /*bh := dcrwire.NewBlockHeader( + &hash, + &merkleHash, + &merkleHash, // stakeRoot + votebits, + winner, + overflow, + voters, + freshstake, + revocations, + bits, + sbits, + nonce, + height, + size)*/ + // baseBlockHdr is used in the various tests as a baseline BlockHeader. bits := uint32(0x1d00ffff) baseBlockHdr := &wire.BlockHeader{ - Version: 1, - PrevBlock: mainNetGenesisHash, - MerkleRoot: mainNetGenesisMerkleRoot, - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST - Bits: bits, - Nonce: nonce, + Version: 1, + PrevBlock: mainNetGenesisHash, + MerkleRoot: mainNetGenesisMerkleRoot, + StakeRoot: mainNetGenesisMerkleRoot, + VoteBits: uint16(0x0000), + FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Voters: uint16(0x0000), + FreshStake: uint8(0x00), + Revocations: uint8(0x00), + PoolSize: uint32(0x00000000), + Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST + Bits: bits, + SBits: int64(0x0000000000000000), + Nonce: nonce, + Height: uint32(0), + Size: uint32(0), } // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. @@ -73,9 +162,27 @@ func TestBlockHeaderWire(t *testing.T) { 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot - 0x29, 0xab, 0x5f, 0x49, // Timestamp + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, //Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits + 0x00, 0x00, 0x00, 0x00, // Height + 0x00, 0x00, 0x00, 0x00, // Size + 0x29, 0xab, 0x5f, 0x49, // Timestamp 0xf3, 0xe0, 0x01, 0x00, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, } tests := []struct { @@ -91,43 +198,12 @@ func TestBlockHeaderWire(t *testing.T) { baseBlockHdrEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version. - { - baseBlockHdr, - baseBlockHdr, - baseBlockHdrEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - baseBlockHdr, - baseBlockHdr, - baseBlockHdrEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - baseBlockHdr, - baseBlockHdr, - baseBlockHdrEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - baseBlockHdr, - baseBlockHdr, - baseBlockHdrEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. + // Former test (doesn't work because of capacity error) var buf bytes.Buffer err := wire.TstWriteBlockHeader(&buf, test.pver, test.in) if err != nil { @@ -140,6 +216,17 @@ func TestBlockHeaderWire(t *testing.T) { continue } + b, err := wire.TstBytesBlockHeader(test.in) + if err != nil { + t.Errorf("writeBlockHeader #%d error %v", i, err) + continue + } + if !bytes.Equal(b, test.buf) { + t.Errorf("writeBlockHeader #%d\n got: %s want: %s", i, + spew.Sdump(b), spew.Sdump(test.buf)) + continue + } + // Decode the block header from wire format. var bh wire.BlockHeader rbuf := bytes.NewReader(test.buf) @@ -163,12 +250,21 @@ func TestBlockHeaderSerialize(t *testing.T) { // baseBlockHdr is used in the various tests as a baseline BlockHeader. bits := uint32(0x1d00ffff) baseBlockHdr := &wire.BlockHeader{ - Version: 1, - PrevBlock: mainNetGenesisHash, - MerkleRoot: mainNetGenesisMerkleRoot, - Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST - Bits: bits, - Nonce: nonce, + Version: 1, + PrevBlock: mainNetGenesisHash, + MerkleRoot: mainNetGenesisMerkleRoot, + StakeRoot: mainNetGenesisMerkleRoot, + VoteBits: uint16(0x0000), + FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Voters: uint16(0x0000), + FreshStake: uint8(0x00), + Revocations: uint8(0x00), + Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST + Bits: bits, + SBits: int64(0x0000000000000000), + Nonce: nonce, + Height: uint32(0), + Size: uint32(0), } // baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr. @@ -182,9 +278,27 @@ func TestBlockHeaderSerialize(t *testing.T) { 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot - 0x29, 0xab, 0x5f, 0x49, // Timestamp + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, //Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits + 0x00, 0x00, 0x00, 0x00, // Height + 0x00, 0x00, 0x00, 0x00, // Size + 0x29, 0xab, 0x5f, 0x49, // Timestamp 0xf3, 0xe0, 0x01, 0x00, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, } tests := []struct { @@ -229,3 +343,28 @@ func TestBlockHeaderSerialize(t *testing.T) { } } } + +func TestBlockHeaderHashing(t *testing.T) { + dummyHeader := "0000000049e0b48ade043f729d60095ed92642d96096fe6aba42f2eda" + + "632d461591a152267dc840ff27602ce1968a81eb30a43423517207617a0150b56c4f72" + + "b803e497f00000000000000000000000000000000000000000000000000000000000000" + + "00010000000000000000000000b7000000ffff7f20204e0000000000005800000060010" + + "0008b990956000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000ABCD" + // This hash has reversed endianness compared to what chainhash spits out. + hashStr := "0d40d58703482d81d711be0ffc1b313788d3c3937e1617e4876661d33a8c4c41" + hashB, _ := hex.DecodeString(hashStr) + hash, _ := chainhash.NewHash(hashB) + + vecH, _ := hex.DecodeString(dummyHeader) + r := bytes.NewReader(vecH) + var bh wire.BlockHeader + bh.Deserialize(r) + hash2 := bh.BlockSha() + + if !hash2.IsEqual(hash) { + t.Errorf("wrong block sha returned (want %v, got %v)", + hash, + hash2) + } +} diff --git a/wire/common.go b/wire/common.go index b988c3df..98c0b224 100644 --- a/wire/common.go +++ b/wire/common.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,21 +12,25 @@ import ( "io" "math" - "github.com/btcsuite/fastsha256" + "github.com/decred/dcrd/chaincfg/chainhash" ) // Maximum payload size for a variable length integer. const MaxVarIntPayload = 9 +// errNonCanonicalVarInt is the common format string used for non-canonically +// encoded variable length integer errors. +var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " + + "encode a value greater than %x" + // readElement reads the next sequence of bytes from r using little endian // depending on the concrete type of element pointed to. func readElement(r io.Reader, element interface{}) error { - var scratch [8]byte - // Attempt to read the element based on the concrete type via fast // type assertions first. switch e := element.(type) { case *int32: + var scratch [4]byte b := scratch[0:4] _, err := io.ReadFull(r, b) if err != nil { @@ -35,6 +40,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *uint32: + var scratch [4]byte b := scratch[0:4] _, err := io.ReadFull(r, b) if err != nil { @@ -44,6 +50,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *int64: + var scratch [8]byte b := scratch[0:8] _, err := io.ReadFull(r, b) if err != nil { @@ -53,6 +60,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *uint64: + var scratch [8]byte b := scratch[0:8] _, err := io.ReadFull(r, b) if err != nil { @@ -62,6 +70,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *bool: + var scratch [1]byte b := scratch[0:1] _, err := io.ReadFull(r, b) if err != nil { @@ -98,7 +107,7 @@ func readElement(r io.Reader, element interface{}) error { } return nil - case *ShaHash: + case *chainhash.Hash: _, err := io.ReadFull(r, e[:]) if err != nil { return err @@ -106,6 +115,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *ServiceFlag: + var scratch [8]byte b := scratch[0:8] _, err := io.ReadFull(r, b) if err != nil { @@ -115,6 +125,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *InvType: + var scratch [4]byte b := scratch[0:4] _, err := io.ReadFull(r, b) if err != nil { @@ -123,16 +134,18 @@ func readElement(r io.Reader, element interface{}) error { *e = InvType(binary.LittleEndian.Uint32(b)) return nil - case *BitcoinNet: + case *CurrencyNet: + var scratch [4]byte b := scratch[0:4] _, err := io.ReadFull(r, b) if err != nil { return err } - *e = BitcoinNet(binary.LittleEndian.Uint32(b)) + *e = CurrencyNet(binary.LittleEndian.Uint32(b)) return nil case *BloomUpdateType: + var scratch [1]byte b := scratch[0:1] _, err := io.ReadFull(r, b) if err != nil { @@ -142,6 +155,7 @@ func readElement(r io.Reader, element interface{}) error { return nil case *RejectCode: + var scratch [1]byte b := scratch[0:1] _, err := io.ReadFull(r, b) if err != nil { @@ -170,12 +184,11 @@ func readElements(r io.Reader, elements ...interface{}) error { // writeElement writes the little endian representation of element to w. func writeElement(w io.Writer, element interface{}) error { - var scratch [8]byte - // Attempt to write the element based on the concrete type via fast // type assertions first. switch e := element.(type) { case int32: + var scratch [4]byte b := scratch[0:4] binary.LittleEndian.PutUint32(b, uint32(e)) _, err := w.Write(b) @@ -185,6 +198,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case uint32: + var scratch [4]byte b := scratch[0:4] binary.LittleEndian.PutUint32(b, e) _, err := w.Write(b) @@ -194,6 +208,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case int64: + var scratch [8]byte b := scratch[0:8] binary.LittleEndian.PutUint64(b, uint64(e)) _, err := w.Write(b) @@ -203,6 +218,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case uint64: + var scratch [8]byte b := scratch[0:8] binary.LittleEndian.PutUint64(b, e) _, err := w.Write(b) @@ -212,6 +228,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case bool: + var scratch [1]byte b := scratch[0:1] if e == true { b[0] = 0x01 @@ -248,7 +265,7 @@ func writeElement(w io.Writer, element interface{}) error { } return nil - case *ShaHash: + case *chainhash.Hash: _, err := w.Write(e[:]) if err != nil { return err @@ -256,6 +273,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case ServiceFlag: + var scratch [8]byte b := scratch[0:8] binary.LittleEndian.PutUint64(b, uint64(e)) _, err := w.Write(b) @@ -265,6 +283,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case InvType: + var scratch [4]byte b := scratch[0:4] binary.LittleEndian.PutUint32(b, uint32(e)) _, err := w.Write(b) @@ -273,7 +292,8 @@ func writeElement(w io.Writer, element interface{}) error { } return nil - case BitcoinNet: + case CurrencyNet: + var scratch [4]byte b := scratch[0:4] binary.LittleEndian.PutUint32(b, uint32(e)) _, err := w.Write(b) @@ -283,6 +303,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case BloomUpdateType: + var scratch [1]byte b := scratch[0:1] b[0] = uint8(e) _, err := w.Write(b) @@ -292,6 +313,7 @@ func writeElement(w io.Writer, element interface{}) error { return nil case RejectCode: + var scratch [1]byte b := scratch[0:1] b[0] = uint8(e) _, err := w.Write(b) @@ -336,6 +358,14 @@ func readVarInt(r io.Reader, pver uint32) (uint64, error) { } rv = binary.LittleEndian.Uint64(b[:]) + // The encoding is not canonical if the value could have been + // encoded using fewer bytes. + min := uint64(0x100000000) + if rv < min { + return 0, messageError("readVarInt", fmt.Sprintf( + errNonCanonicalVarInt, rv, discriminant, min)) + } + case 0xfe: _, err := io.ReadFull(r, b[0:4]) if err != nil { @@ -343,6 +373,14 @@ func readVarInt(r io.Reader, pver uint32) (uint64, error) { } rv = uint64(binary.LittleEndian.Uint32(b[:])) + // The encoding is not canonical if the value could have been + // encoded using fewer bytes. + min := uint64(0x10000) + if rv < min { + return 0, messageError("readVarInt", fmt.Sprintf( + errNonCanonicalVarInt, rv, discriminant, min)) + } + case 0xfd: _, err := io.ReadFull(r, b[0:2]) if err != nil { @@ -350,6 +388,14 @@ func readVarInt(r io.Reader, pver uint32) (uint64, error) { } rv = uint64(binary.LittleEndian.Uint16(b[:])) + // The encoding is not canonical if the value could have been + // encoded using fewer bytes. + min := uint64(0xfd) + if rv < min { + return 0, messageError("readVarInt", fmt.Sprintf( + errNonCanonicalVarInt, rv, discriminant, min)) + } + default: rv = uint64(discriminant) } @@ -519,17 +565,3 @@ func randomUint64(r io.Reader) (uint64, error) { func RandomUint64() (uint64, error) { return randomUint64(rand.Reader) } - -// DoubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. -func DoubleSha256(b []byte) []byte { - first := fastsha256.Sum256(b) - second := fastsha256.Sum256(first[:]) - return second[:] -} - -// DoubleSha256SH calculates sha256(sha256(b)) and returns the resulting bytes -// as a ShaHash. -func DoubleSha256SH(b []byte) ShaHash { - first := fastsha256.Sum256(b) - return ShaHash(fastsha256.Sum256(first[:])) -} diff --git a/wire/common_test.go b/wire/common_test.go index 3d955705..cdeee173 100644 --- a/wire/common_test.go +++ b/wire/common_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,13 +13,15 @@ import ( "strings" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // mainNetGenesisHash is the hash of the first block in the block chain for the // main network (genesis block). -var mainNetGenesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. +var mainNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, @@ -27,7 +30,7 @@ var mainNetGenesisHash = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. // mainNetGenesisMerkleRoot is the hash of the first transaction in the genesis // block for the main network. -var mainNetGenesisMerkleRoot = wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. +var mainNetGenesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, @@ -104,7 +107,7 @@ func TestElementWire(t *testing.T) { }, }, { - (*wire.ShaHash)(&[wire.HashSize]byte{ // Make go vet happy. + (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -126,8 +129,8 @@ func TestElementWire(t *testing.T) { []byte{0x01, 0x00, 0x00, 0x00}, }, { - wire.BitcoinNet(wire.MainNet), - []byte{0xf9, 0xbe, 0xb4, 0xd9}, + wire.CurrencyNet(wire.MainNet), + []byte{0xf9, 0x00, 0xb4, 0xd9}, }, // Type not supported by the "fast" path and requires reflection. { @@ -203,7 +206,7 @@ func TestElementWireErrors(t *testing.T) { 0, io.ErrShortWrite, io.EOF, }, { - (*wire.ShaHash)(&[wire.HashSize]byte{ // Make go vet happy. + (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, @@ -213,7 +216,7 @@ func TestElementWireErrors(t *testing.T) { }, {wire.ServiceFlag(wire.SFNodeNetwork), 0, io.ErrShortWrite, io.EOF}, {wire.InvType(wire.InvTypeTx), 0, io.ErrShortWrite, io.EOF}, - {wire.BitcoinNet(wire.MainNet), 0, io.ErrShortWrite, io.EOF}, + {wire.CurrencyNet(wire.MainNet), 0, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) @@ -354,6 +357,62 @@ func TestVarIntWireErrors(t *testing.T) { } } +// TestVarIntNonCanonical ensures variable length integers that are not encoded +// canonically return the expected error. +func TestVarIntNonCanonical(t *testing.T) { + pver := wire.ProtocolVersion + + tests := []struct { + name string // Test name for easier identification + in []byte // Value to decode + pver uint32 // Protocol version for wire encoding + }{ + { + "0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00}, + pver, + }, + { + "max single-byte value encoded with 3 bytes", + []byte{0xfd, 0xfc, 0x00}, pver, + }, + { + "0 encoded with 5 bytes", + []byte{0xfe, 0x00, 0x00, 0x00, 0x00}, pver, + }, + { + "max three-byte value encoded with 5 bytes", + []byte{0xfe, 0xff, 0xff, 0x00, 0x00}, pver, + }, + { + "0 encoded with 9 bytes", + []byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + pver, + }, + { + "max five-byte value encoded with 9 bytes", + []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}, + pver, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Decode from wire format. + rbuf := bytes.NewReader(test.in) + val, err := wire.TstReadVarInt(rbuf, test.pver) + if _, ok := err.(*wire.MessageError); !ok { + t.Errorf("readVarInt #%d (%s) unexpected error %v", i, + test.name, err) + continue + } + if val != 0 { + t.Errorf("readVarInt #%d (%s)\n got: %d want: 0", i, + test.name, val) + continue + } + } +} + // TestVarIntWire tests the serialize size for variable length integers. func TestVarIntSerializeSize(t *testing.T) { tests := []struct { diff --git a/wire/doc.go b/wire/doc.go index 8ae4e361..084634c7 100644 --- a/wire/doc.go +++ b/wire/doc.go @@ -1,41 +1,42 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. /* -Package wire implements the bitcoin wire protocol. +Package wire implements the decred wire protocol. -For the complete details of the bitcoin protocol, see the official wiki entry +For the complete details of the decred protocol, see the official wiki entry at https://en.bitcoin.it/wiki/Protocol_specification. The following only serves as a quick overview to provide information on how to use the package. At a high level, this package provides support for marshalling and unmarshalling -supported bitcoin messages to and from the wire. This package does not deal +supported decred messages to and from the wire. This package does not deal with the specifics of message handling such as what to do when a message is received. This provides the caller with a high level of flexibility. -Bitcoin Message Overview +Decred Message Overview -The bitcoin protocol consists of exchanging messages between peers. Each +The decred protocol consists of exchanging messages between peers. Each message is preceded by a header which identifies information about it such as -which bitcoin network it is a part of, its type, how big it is, and a checksum +which decred network it is a part of, its type, how big it is, and a checksum to verify validity. All encoding and decoding of message headers is handled by this package. -To accomplish this, there is a generic interface for bitcoin messages named +To accomplish this, there is a generic interface for decred messages named Message which allows messages of any type to be read, written, or passed around through channels, functions, etc. In addition, concrete implementations of most -of the currently supported bitcoin messages are provided. For these supported +of the currently supported decred messages are provided. For these supported messages, all of the details of marshalling and unmarshalling to and from the -wire using bitcoin encoding are handled so the caller doesn't have to concern +wire using decred encoding are handled so the caller doesn't have to concern themselves with the specifics. Message Interaction -The following provides a quick summary of how the bitcoin messages are intended +The following provides a quick summary of how the decred messages are intended to interact with one another. As stated above, these interactions are not directly handled by this package. For more in-depth details about the -appropriate interactions, see the official bitcoin protocol wiki entry at +appropriate interactions, see the official decred protocol wiki entry at https://en.bitcoin.it/wiki/Protocol_specification. The initial handshake consists of two peers sending each other a version message @@ -65,7 +66,7 @@ interactions in no particular order. Common Parameters There are several common parameters that arise when using this package to read -and write bitcoin messages. The following sections provide a quick overview of +and write decred messages. The following sections provide a quick overview of these parameters so the next sections can build on them. Protocol Version @@ -77,21 +78,20 @@ latest protocol version this package supports and is typically the value to use for all outbound connections before a potentially lower protocol version is negotiated. -Bitcoin Network +Decred Network -The bitcoin network is a magic number which is used to identify the start of a -message and which bitcoin network the message applies to. This package provides +The decred network is a magic number which is used to identify the start of a +message and which decred network the message applies to. This package provides the following constants: wire.MainNet - wire.TestNet (Regression test network) - wire.TestNet3 (Test network version 3) + wire.TestNet (Test network version 3) wire.SimNet (Simulation test network) Determining Message Type -As discussed in the bitcoin message overview section, this package reads -and writes bitcoin messages using a generic interface named Message. In +As discussed in the decred message overview section, this package reads +and writes decred messages using a generic interface named Message. In order to determine the actual concrete type of the message, use a type switch or type assertion. An example of a type switch follows: @@ -108,12 +108,12 @@ switch or type assertion. An example of a type switch follows: Reading Messages -In order to unmarshall bitcoin messages from the wire, use the ReadMessage +In order to unmarshall decred messages from the wire, use the ReadMessage function. It accepts any io.Reader, but typically this will be a net.Conn to -a remote node running a bitcoin peer. Example syntax is: +a remote node running a decred peer. Example syntax is: - // Reads and validates the next bitcoin message from conn using the - // protocol version pver and the bitcoin network btcnet. The returns + // Reads and validates the next decred message from conn using the + // protocol version pver and the decred network btcnet. The returns // are a wire.Message, a []byte which contains the unmarshalled // raw payload, and a possible error. msg, rawPayload, err := wire.ReadMessage(conn, pver, btcnet) @@ -123,16 +123,16 @@ a remote node running a bitcoin peer. Example syntax is: Writing Messages -In order to marshall bitcoin messages to the wire, use the WriteMessage +In order to marshall decred messages to the wire, use the WriteMessage function. It accepts any io.Writer, but typically this will be a net.Conn to -a remote node running a bitcoin peer. Example syntax to request addresses +a remote node running a decred peer. Example syntax to request addresses from a remote peer is: - // Create a new getaddr bitcoin message. + // Create a new getaddr decred message. msg := wire.NewMsgGetAddr() - // Writes a bitcoin message msg to conn using the protocol version - // pver, and the bitcoin network btcnet. The return is a possible + // Writes a decred message msg to conn using the protocol version + // pver, and the decred network btcnet. The return is a possible // error. err := wire.WriteMessage(conn, msg, pver, btcnet) if err != nil { diff --git a/wire/error.go b/wire/error.go index 755c2db4..3b27269f 100644 --- a/wire/error.go +++ b/wire/error.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,7 @@ import ( ) // MessageError describes an issue with a message. -// An example of some potential issues are messages from the wrong bitcoin +// An example of some potential issues are messages from the wrong decred // network, invalid commands, mismatched checksums, and exceeding max payloads. // // This provides a mechanism for the caller to type assert the error to diff --git a/wire/fakeconn_test.go b/wire/fakeconn_test.go index b8536718..e7ad6267 100644 --- a/wire/fakeconn_test.go +++ b/wire/fakeconn_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/wire/fakemessage_test.go b/wire/fakemessage_test.go index a3ed4d6d..3fe28616 100644 --- a/wire/fakemessage_test.go +++ b/wire/fakemessage_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +8,7 @@ package wire_test import ( "io" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // fakeMessage implements the wire.Message interface and is used to force diff --git a/wire/fixedIO_test.go b/wire/fixedIO_test.go index 77811f96..96a29425 100644 --- a/wire/fixedIO_test.go +++ b/wire/fixedIO_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. diff --git a/wire/shahash_test.go b/wire/hash_test.go similarity index 85% rename from wire/shahash_test.go rename to wire/hash_test.go index 908bfc4e..ed218c6a 100644 --- a/wire/shahash_test.go +++ b/wire/hash_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,7 @@ import ( "encoding/hex" "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/chaincfg/chainhash" ) // TestShaHash tests the ShaHash API. @@ -17,7 +18,7 @@ func TestShaHash(t *testing.T) { // Hash of block 234439. blockHashStr := "14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef" - blockHash, err := wire.NewShaHashFromStr(blockHashStr) + blockHash, err := chainhash.NewHashFromStr(blockHashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -30,15 +31,15 @@ func TestShaHash(t *testing.T) { 0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } - hash, err := wire.NewShaHash(buf) + hash, err := chainhash.NewHash(buf) if err != nil { t.Errorf("NewShaHash: unexpected error %v", err) } // Ensure proper size. - if len(hash) != wire.HashSize { + if len(hash) != chainhash.HashSize { t.Errorf("NewShaHash: hash length mismatch - got: %v, want: %v", - len(hash), wire.HashSize) + len(hash), chainhash.HashSize) } // Ensure contents match. @@ -70,8 +71,8 @@ func TestShaHash(t *testing.T) { } // Invalid size for NewShaHash. - invalidHash := make([]byte, wire.HashSize+1) - _, err = wire.NewShaHash(invalidHash) + invalidHash := make([]byte, chainhash.HashSize+1) + _, err = chainhash.NewHash(invalidHash) if err == nil { t.Errorf("NewShaHash: failed to received expected err - got: nil") } @@ -81,7 +82,7 @@ func TestShaHash(t *testing.T) { func TestShaHashString(t *testing.T) { // Block 100000 hash. wantStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hash := wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + hash := chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39, 0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2, 0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa, @@ -99,7 +100,7 @@ func TestShaHashString(t *testing.T) { func TestNewShaHashFromStr(t *testing.T) { tests := []struct { in string - want wire.ShaHash + want chainhash.Hash err error }{ // Genesis hash. @@ -119,14 +120,14 @@ func TestNewShaHashFromStr(t *testing.T) { // Empty string. { "", - wire.ShaHash{}, + chainhash.Hash{}, nil, }, // Single digit hash. { "1", - wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -138,7 +139,7 @@ func TestNewShaHashFromStr(t *testing.T) { // Block 203707 with stripped leading zeros. { "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc", - wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0xdc, 0xe9, 0x69, 0x10, 0x94, 0xda, 0x23, 0xc7, 0xe7, 0x67, 0x13, 0xd0, 0x75, 0xd4, 0xa1, 0x0b, 0x79, 0x40, 0x08, 0xa6, 0x36, 0xac, 0xc2, 0x4b, @@ -150,14 +151,14 @@ func TestNewShaHashFromStr(t *testing.T) { // Hash string that is too long. { "01234567890123456789012345678901234567890123456789012345678912345", - wire.ShaHash{}, - wire.ErrHashStrSize, + chainhash.Hash{}, + chainhash.ErrHashStrSize, }, // Hash string that is contains non-hex chars. { "abcdefg", - wire.ShaHash{}, + chainhash.Hash{}, hex.InvalidByteError('g'), }, } @@ -166,7 +167,7 @@ func TestNewShaHashFromStr(t *testing.T) { unexpectedResultStr := "NewShaHashFromStr #%d got: %v want: %v" t.Logf("Running %d tests", len(tests)) for i, test := range tests { - result, err := wire.NewShaHashFromStr(test.in) + result, err := chainhash.NewHashFromStr(test.in) if err != test.err { t.Errorf(unexpectedErrStr, i, err, test.err) continue diff --git a/wire/internal_test.go b/wire/internal_test.go index e3fec695..9a1bc06f 100644 --- a/wire/internal_test.go +++ b/wire/internal_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,9 +17,9 @@ import ( ) const ( - // MaxTxPerBlock makes the internal maxTxPerBlock constant available to + // MaxTxPerBlock makes the internal maxTxPerTxTree constant available to // the test package. - MaxTxPerBlock = maxTxPerBlock + MaxTxPerBlock = MaxTxPerTxTree // MaxFlagsPerMerkleBlock makes the internal maxFlagsPerMerkleBlock // constant available to the test package. @@ -128,3 +129,9 @@ func TstReadBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { func TstWriteBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { return writeBlockHeader(w, pver, bh) } + +// TstWriteBlockHeader makes the internal writeBlockHeader function available to +// the test package. +func TstBytesBlockHeader(bh *BlockHeader) ([]byte, error) { + return bh.Bytes() +} diff --git a/wire/invvect.go b/wire/invvect.go index cf7def24..ad2e7efc 100644 --- a/wire/invvect.go +++ b/wire/invvect.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,15 +8,17 @@ package wire import ( "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) const ( // MaxInvPerMsg is the maximum number of inventory vectors that can be in a - // single bitcoin inv message. + // single decred inv message. MaxInvPerMsg = 50000 // Maximum payload size for an inventory vector. - maxInvVectPayload = 4 + HashSize + maxInvVectPayload = 4 + chainhash.HashSize ) // InvType represents the allowed types of inventory vectors. See InvVect. @@ -46,16 +49,16 @@ func (invtype InvType) String() string { return fmt.Sprintf("Unknown InvType (%d)", uint32(invtype)) } -// InvVect defines a bitcoin inventory vector which is used to describe data, +// InvVect defines a decred inventory vector which is used to describe data, // as specified by the Type field, that a peer wants, has, or does not have to // another peer. type InvVect struct { - Type InvType // Type of data - Hash ShaHash // Hash of the data + Type InvType // Type of data + Hash chainhash.Hash // Hash of the data } // NewInvVect returns a new InvVect using the provided type and hash. -func NewInvVect(typ InvType, hash *ShaHash) *InvVect { +func NewInvVect(typ InvType, hash *chainhash.Hash) *InvVect { return &InvVect{ Type: typ, Hash: *hash, diff --git a/wire/invvect_test.go b/wire/invvect_test.go index da3fddd9..a9ba15f6 100644 --- a/wire/invvect_test.go +++ b/wire/invvect_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestInvVectStringer tests the stringized output for inventory vector types. @@ -40,7 +43,7 @@ func TestInvTypeStringer(t *testing.T) { // TestInvVect tests the InvVect API. func TestInvVect(t *testing.T) { ivType := wire.InvTypeBlock - hash := wire.ShaHash{} + hash := chainhash.Hash{} // Ensure we get the same payload and signature back out. iv := wire.NewInvVect(ivType, &hash) @@ -60,7 +63,7 @@ func TestInvVect(t *testing.T) { func TestInvVectWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - baseHash, err := wire.NewShaHashFromStr(hashStr) + baseHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -68,7 +71,7 @@ func TestInvVectWire(t *testing.T) { // errInvVect is an inventory vector with an error. errInvVect := wire.InvVect{ Type: wire.InvTypeError, - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, } // errInvVectEncoded is the wire encoded bytes of errInvVect. @@ -139,102 +142,6 @@ func TestInvVectWire(t *testing.T) { blockInvVectEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version error inventory vector. - { - errInvVect, - errInvVect, - errInvVectEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version tx inventory vector. - { - txInvVect, - txInvVect, - txInvVectEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version block inventory vector. - { - blockInvVect, - blockInvVect, - blockInvVectEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version error inventory vector. - { - errInvVect, - errInvVect, - errInvVectEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version tx inventory vector. - { - txInvVect, - txInvVect, - txInvVectEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version block inventory vector. - { - blockInvVect, - blockInvVect, - blockInvVectEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion error inventory vector. - { - errInvVect, - errInvVect, - errInvVectEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion tx inventory vector. - { - txInvVect, - txInvVect, - txInvVectEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion block inventory vector. - { - blockInvVect, - blockInvVect, - blockInvVectEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion error inventory vector. - { - errInvVect, - errInvVect, - errInvVectEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion tx inventory vector. - { - txInvVect, - txInvVect, - txInvVectEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion block inventory vector. - { - blockInvVect, - blockInvVect, - blockInvVectEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/message.go b/wire/message.go index aaf803dd..71a82bac 100644 --- a/wire/message.go +++ b/wire/message.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,14 +10,16 @@ import ( "fmt" "io" "unicode/utf8" + + "github.com/decred/dcrd/chaincfg/chainhash" ) -// MessageHeaderSize is the number of bytes in a bitcoin message header. -// Bitcoin network (magic) 4 bytes + command 12 bytes + payload length 4 bytes + +// MessageHeaderSize is the number of bytes in a decred message header. +// Decred network (magic) 4 bytes + command 12 bytes + payload length 4 bytes + // checksum 4 bytes. const MessageHeaderSize = 24 -// CommandSize is the fixed size of all commands in the common bitcoin message +// CommandSize is the fixed size of all commands in the common decred message // header. Shorter commands must be zero padded. const CommandSize = 12 @@ -24,32 +27,34 @@ const CommandSize = 12 // individual limits imposed by messages themselves. const MaxMessagePayload = (1024 * 1024 * 32) // 32MB -// Commands used in bitcoin message headers which describe the type of message. +// Commands used in message headers which describe the type of message. const ( - CmdVersion = "version" - CmdVerAck = "verack" - CmdGetAddr = "getaddr" - CmdAddr = "addr" - CmdGetBlocks = "getblocks" - CmdInv = "inv" - CmdGetData = "getdata" - CmdNotFound = "notfound" - CmdBlock = "block" - CmdTx = "tx" - CmdGetHeaders = "getheaders" - CmdHeaders = "headers" - CmdPing = "ping" - CmdPong = "pong" - CmdAlert = "alert" - CmdMemPool = "mempool" - CmdFilterAdd = "filteradd" - CmdFilterClear = "filterclear" - CmdFilterLoad = "filterload" - CmdMerkleBlock = "merkleblock" - CmdReject = "reject" + CmdVersion = "version" + CmdVerAck = "verack" + CmdGetAddr = "getaddr" + CmdAddr = "addr" + CmdGetBlocks = "getblocks" + CmdInv = "inv" + CmdGetData = "getdata" + CmdNotFound = "notfound" + CmdBlock = "block" + CmdTx = "tx" + CmdGetHeaders = "getheaders" + CmdHeaders = "headers" + CmdPing = "ping" + CmdPong = "pong" + CmdAlert = "alert" + CmdMemPool = "mempool" + CmdMiningState = "miningstate" + CmdGetMiningState = "getminings" + CmdFilterAdd = "filteradd" + CmdFilterClear = "filterclear" + CmdFilterLoad = "filterload" + CmdMerkleBlock = "merkleblock" + CmdReject = "reject" ) -// Message is an interface that describes a bitcoin message. A type that +// Message is an interface that describes a decred message. A type that // implements Message has complete control over the representation of its data // and may therefore contain additional or fewer fields than those which // are used directly in the protocol encoded message. @@ -113,6 +118,12 @@ func makeEmptyMessage(command string) (Message, error) { case CmdMemPool: msg = &MsgMemPool{} + case CmdMiningState: + msg = &MsgMiningState{} + + case CmdGetMiningState: + msg = &MsgGetMiningState{} + case CmdFilterAdd: msg = &MsgFilterAdd{} @@ -134,15 +145,15 @@ func makeEmptyMessage(command string) (Message, error) { return msg, nil } -// messageHeader defines the header structure for all bitcoin protocol messages. +// messageHeader defines the header structure for all decred protocol messages. type messageHeader struct { - magic BitcoinNet // 4 bytes - command string // 12 bytes - length uint32 // 4 bytes - checksum [4]byte // 4 bytes + magic CurrencyNet // 4 bytes + command string // 12 bytes + length uint32 // 4 bytes + checksum [4]byte // 4 bytes } -// readMessageHeader reads a bitcoin message header from r. +// readMessageHeader reads a decred message header from r. func readMessageHeader(r io.Reader) (int, *messageHeader, error) { // Since readElements doesn't return the amount of bytes read, attempt // to read the entire header into a buffer first in case there is a @@ -186,10 +197,10 @@ func discardInput(r io.Reader, n uint32) { } } -// WriteMessageN writes a bitcoin Message to w including the necessary header +// WriteMessageN writes a decred Message to w including the necessary header // information and returns the number of bytes written. This function is the // same as WriteMessage except it also returns the number of bytes written. -func WriteMessageN(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) (int, error) { +func WriteMessageN(w io.Writer, msg Message, pver uint32, dcrnet CurrencyNet) (int, error) { totalBytes := 0 // Enforce max command size. @@ -230,10 +241,10 @@ func WriteMessageN(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) (in // Create header for the message. hdr := messageHeader{} - hdr.magic = btcnet + hdr.magic = dcrnet hdr.command = cmd hdr.length = uint32(lenp) - copy(hdr.checksum[:], DoubleSha256(payload)[0:4]) + copy(hdr.checksum[:], chainhash.HashFuncB(payload)[0:4]) // Encode the header for the message. This is done to a buffer // rather than directly to the writer since writeElements doesn't @@ -260,22 +271,22 @@ func WriteMessageN(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) (in return totalBytes, nil } -// WriteMessage writes a bitcoin Message to w including the necessary header +// WriteMessage writes a decred Message to w including the necessary header // information. This function is the same as WriteMessageN except it doesn't // doesn't return the number of bytes written. This function is mainly provided // for backwards compatibility with the original API, but it's also useful for // callers that don't care about byte counts. -func WriteMessage(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) error { - _, err := WriteMessageN(w, msg, pver, btcnet) +func WriteMessage(w io.Writer, msg Message, pver uint32, dcrnet CurrencyNet) error { + _, err := WriteMessageN(w, msg, pver, dcrnet) return err } -// ReadMessageN reads, validates, and parses the next bitcoin Message from r for -// the provided protocol version and bitcoin network. It returns the number of +// ReadMessageN reads, validates, and parses the next decred Message from r for +// the provided protocol version and decred network. It returns the number of // bytes read in addition to the parsed Message and raw bytes which comprise the // message. This function is the same as ReadMessage except it also returns the // number of bytes read. -func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, []byte, error) { +func ReadMessageN(r io.Reader, pver uint32, dcrnet CurrencyNet) (int, Message, []byte, error) { totalBytes := 0 n, hdr, err := readMessageHeader(r) if err != nil { @@ -293,8 +304,8 @@ func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, [] } - // Check for messages from the wrong bitcoin network. - if hdr.magic != btcnet { + // Check for messages from the wrong decred network. + if hdr.magic != dcrnet { discardInput(r, hdr.length) str := fmt.Sprintf("message from other network [%v]", hdr.magic) return totalBytes, nil, nil, messageError("ReadMessage", str) @@ -338,7 +349,7 @@ func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, [] totalBytes += n // Test checksum. - checksum := DoubleSha256(payload)[0:4] + checksum := chainhash.HashFuncB(payload)[0:4] if !bytes.Equal(checksum[:], hdr.checksum[:]) { str := fmt.Sprintf("payload checksum failed - header "+ "indicates %v, but actual checksum is %v.", @@ -357,13 +368,13 @@ func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, [] return totalBytes, msg, payload, nil } -// ReadMessage reads, validates, and parses the next bitcoin Message from r for -// the provided protocol version and bitcoin network. It returns the parsed +// ReadMessage reads, validates, and parses the next decred Message from r for +// the provided protocol version and decred network. It returns the parsed // Message and raw bytes which comprise the message. This function only differs // from ReadMessageN in that it doesn't return the number of bytes read. This // function is mainly provided for backwards compatibility with the original // API, but it's also useful for callers that don't care about byte counts. -func ReadMessage(r io.Reader, pver uint32, btcnet BitcoinNet) (Message, []byte, error) { - _, msg, buf, err := ReadMessageN(r, pver, btcnet) +func ReadMessage(r io.Reader, pver uint32, dcrnet CurrencyNet) (Message, []byte, error) { + _, msg, buf, err := ReadMessageN(r, pver, dcrnet) return msg, buf, err } diff --git a/wire/message_test.go b/wire/message_test.go index e913dd10..8746d6eb 100644 --- a/wire/message_test.go +++ b/wire/message_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -13,20 +14,21 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // makeHeader is a convenience function to make a message header in the form of // a byte slice. It is used to force errors when reading messages. -func makeHeader(btcnet wire.BitcoinNet, command string, +func makeHeader(dcrnet wire.CurrencyNet, command string, payloadLen uint32, checksum uint32) []byte { - // The length of a bitcoin message header is 24 bytes. - // 4 byte magic number of the bitcoin network + 12 byte command + 4 byte + // The length of a decred message header is 24 bytes. + // 4 byte magic number of the decred network + 12 byte command + 4 byte // payload length + 4 byte checksum. buf := make([]byte, 24) - binary.LittleEndian.PutUint32(buf, uint32(btcnet)) + binary.LittleEndian.PutUint32(buf, uint32(dcrnet)) copy(buf[4:], []byte(command)) binary.LittleEndian.PutUint32(buf[16:], payloadLen) binary.LittleEndian.PutUint32(buf[20:], checksum) @@ -57,8 +59,8 @@ func TestMessage(t *testing.T) { msgVerack := wire.NewMsgVerAck() msgGetAddr := wire.NewMsgGetAddr() msgAddr := wire.NewMsgAddr() - msgGetBlocks := wire.NewMsgGetBlocks(&wire.ShaHash{}) - msgBlock := &blockOne + msgGetBlocks := wire.NewMsgGetBlocks(&chainhash.Hash{}) + msgBlock := &testBlock msgInv := wire.NewMsgInv() msgGetData := wire.NewMsgGetData() msgNotFound := wire.NewMsgNotFound() @@ -72,45 +74,62 @@ func TestMessage(t *testing.T) { msgFilterAdd := wire.NewMsgFilterAdd([]byte{0x01}) msgFilterClear := wire.NewMsgFilterClear() msgFilterLoad := wire.NewMsgFilterLoad([]byte{0x01}, 10, 0, wire.BloomUpdateNone) - bh := wire.NewBlockHeader(&wire.ShaHash{}, &wire.ShaHash{}, 0, 0) + bh := wire.NewBlockHeader( + int32(0), // Version + &chainhash.Hash{}, // PrevHash + &chainhash.Hash{}, // MerkleRoot + &chainhash.Hash{}, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + uint32(0x00000000), // Bits + int64(0x0000000000000000), // Sbits + uint32(0), // Height + uint32(0), // Size + uint32(0x00000000), // Nonce + [36]byte{}, // ExtraData + ) msgMerkleBlock := wire.NewMsgMerkleBlock(bh) msgReject := wire.NewMsgReject("block", wire.RejectDuplicate, "duplicate block") tests := []struct { - in wire.Message // Value to encode - out wire.Message // Expected decoded value - pver uint32 // Protocol version for wire encoding - btcnet wire.BitcoinNet // Network to use for wire encoding - bytes int // Expected num bytes read/written + in wire.Message // Value to encode + out wire.Message // Expected decoded value + pver uint32 // Protocol version for wire encoding + dcrnet wire.CurrencyNet // Network to use for wire encoding + bytes int // Expected num bytes read/written }{ - {msgVersion, msgVersion, pver, wire.MainNet, 125}, - {msgVerack, msgVerack, pver, wire.MainNet, 24}, - {msgGetAddr, msgGetAddr, pver, wire.MainNet, 24}, - {msgAddr, msgAddr, pver, wire.MainNet, 25}, - {msgGetBlocks, msgGetBlocks, pver, wire.MainNet, 61}, - {msgBlock, msgBlock, pver, wire.MainNet, 239}, - {msgInv, msgInv, pver, wire.MainNet, 25}, - {msgGetData, msgGetData, pver, wire.MainNet, 25}, - {msgNotFound, msgNotFound, pver, wire.MainNet, 25}, - {msgTx, msgTx, pver, wire.MainNet, 34}, - {msgPing, msgPing, pver, wire.MainNet, 32}, - {msgPong, msgPong, pver, wire.MainNet, 32}, - {msgGetHeaders, msgGetHeaders, pver, wire.MainNet, 61}, - {msgHeaders, msgHeaders, pver, wire.MainNet, 25}, - {msgAlert, msgAlert, pver, wire.MainNet, 42}, - {msgMemPool, msgMemPool, pver, wire.MainNet, 24}, - {msgFilterAdd, msgFilterAdd, pver, wire.MainNet, 26}, - {msgFilterClear, msgFilterClear, pver, wire.MainNet, 24}, - {msgFilterLoad, msgFilterLoad, pver, wire.MainNet, 35}, - {msgMerkleBlock, msgMerkleBlock, pver, wire.MainNet, 110}, - {msgReject, msgReject, pver, wire.MainNet, 79}, + {msgVersion, msgVersion, pver, wire.MainNet, 125}, // [0] + {msgVerack, msgVerack, pver, wire.MainNet, 24}, // [1] + {msgGetAddr, msgGetAddr, pver, wire.MainNet, 24}, // [2] + {msgAddr, msgAddr, pver, wire.MainNet, 25}, // [3] + {msgGetBlocks, msgGetBlocks, pver, wire.MainNet, 61}, // [4] + {msgBlock, msgBlock, pver, wire.MainNet, 522}, // [5] + {msgInv, msgInv, pver, wire.MainNet, 25}, // [6] + {msgGetData, msgGetData, pver, wire.MainNet, 25}, // [7] + {msgNotFound, msgNotFound, pver, wire.MainNet, 25}, // [8] + {msgTx, msgTx, pver, wire.MainNet, 39}, // [9] + {msgPing, msgPing, pver, wire.MainNet, 32}, // [10] + {msgPong, msgPong, pver, wire.MainNet, 32}, // [11] + {msgGetHeaders, msgGetHeaders, pver, wire.MainNet, 61}, // [12] + {msgHeaders, msgHeaders, pver, wire.MainNet, 25}, // [13] + {msgAlert, msgAlert, pver, wire.MainNet, 42}, // [14] + {msgMemPool, msgMemPool, pver, wire.MainNet, 24}, // [15] + {msgFilterAdd, msgFilterAdd, pver, wire.MainNet, 26}, // [16] + {msgFilterClear, msgFilterClear, pver, wire.MainNet, 24}, // [17] + {msgFilterLoad, msgFilterLoad, pver, wire.MainNet, 35}, // [18] + {msgMerkleBlock, msgMerkleBlock, pver, wire.MainNet, 215}, // [19] + {msgReject, msgReject, pver, wire.MainNet, 79}, // [20] } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. var buf bytes.Buffer - nw, err := wire.WriteMessageN(&buf, test.in, test.pver, test.btcnet) + nw, err := wire.WriteMessageN(&buf, test.in, test.pver, test.dcrnet) if err != nil { t.Errorf("WriteMessage #%d error %v", i, err) continue @@ -124,7 +143,7 @@ func TestMessage(t *testing.T) { // Decode from wire format. rbuf := bytes.NewReader(buf.Bytes()) - nr, msg, _, err := wire.ReadMessageN(rbuf, test.pver, test.btcnet) + nr, msg, _, err := wire.ReadMessageN(rbuf, test.pver, test.dcrnet) if err != nil { t.Errorf("ReadMessage #%d error %v, msg %v", i, err, spew.Sdump(msg)) @@ -149,7 +168,7 @@ func TestMessage(t *testing.T) { for i, test := range tests { // Encode to wire format. var buf bytes.Buffer - err := wire.WriteMessage(&buf, test.in, test.pver, test.btcnet) + err := wire.WriteMessage(&buf, test.in, test.pver, test.dcrnet) if err != nil { t.Errorf("WriteMessage #%d error %v", i, err) continue @@ -157,7 +176,7 @@ func TestMessage(t *testing.T) { // Decode from wire format. rbuf := bytes.NewReader(buf.Bytes()) - msg, _, err := wire.ReadMessage(rbuf, test.pver, test.btcnet) + msg, _, err := wire.ReadMessage(rbuf, test.pver, test.dcrnet) if err != nil { t.Errorf("ReadMessage #%d error %v, msg %v", i, err, spew.Sdump(msg)) @@ -175,7 +194,7 @@ func TestMessage(t *testing.T) { // concrete messages to confirm error paths work correctly. func TestReadMessageWireErrors(t *testing.T) { pver := wire.ProtocolVersion - btcnet := wire.MainNet + dcrnet := wire.MainNet // Ensure message errors are as expected with no function specified. wantErr := "something bad happened" @@ -193,31 +212,31 @@ func TestReadMessageWireErrors(t *testing.T) { testErr.Error(), wantErr) } - // Wire encoded bytes for main and testnet3 networks magic identifiers. - testNet3Bytes := makeHeader(wire.TestNet3, "", 0, 0) + // Wire encoded bytes for main and testnet networks magic identifiers. + testNet3Bytes := makeHeader(wire.TestNet, "", 0, 0) // Wire encoded bytes for a message that exceeds max overall message // length. mpl := uint32(wire.MaxMessagePayload) - exceedMaxPayloadBytes := makeHeader(btcnet, "getaddr", mpl+1, 0) + exceedMaxPayloadBytes := makeHeader(dcrnet, "getaddr", mpl+1, 0) // Wire encoded bytes for a command which is invalid utf-8. - badCommandBytes := makeHeader(btcnet, "bogus", 0, 0) + badCommandBytes := makeHeader(dcrnet, "bogus", 0, 0) badCommandBytes[4] = 0x81 // Wire encoded bytes for a command which is valid, but not supported. - unsupportedCommandBytes := makeHeader(btcnet, "bogus", 0, 0) + unsupportedCommandBytes := makeHeader(dcrnet, "bogus", 0, 0) // Wire encoded bytes for a message which exceeds the max payload for // a specific message type. - exceedTypePayloadBytes := makeHeader(btcnet, "getaddr", 1, 0) + exceedTypePayloadBytes := makeHeader(dcrnet, "getaddr", 1, 0) // Wire encoded bytes for a message which does not deliver the full // payload according to the header length. - shortPayloadBytes := makeHeader(btcnet, "version", 115, 0) + shortPayloadBytes := makeHeader(dcrnet, "version", 115, 0) // Wire encoded bytes for a message with a bad checksum. - badChecksumBytes := makeHeader(btcnet, "version", 2, 0xbeef) + badChecksumBytes := makeHeader(dcrnet, "version", 2, 0xbeef) badChecksumBytes = append(badChecksumBytes, []byte{0x0, 0x0}...) // Wire encoded bytes for a message which has a valid header, but is @@ -225,118 +244,118 @@ func TestReadMessageWireErrors(t *testing.T) { // contained in the message. Claim there is two, but don't provide // them. At the same time, forge the header fields so the message is // otherwise accurate. - badMessageBytes := makeHeader(btcnet, "addr", 1, 0xeaadc31c) + badMessageBytes := makeHeader(dcrnet, "addr", 1, 0xeaadc31c) badMessageBytes = append(badMessageBytes, 0x2) // Wire encoded bytes for a message which the header claims has 15k // bytes of data to discard. - discardBytes := makeHeader(btcnet, "bogus", 15*1024, 0) + discardBytes := makeHeader(dcrnet, "bogus", 15*1024, 0) tests := []struct { - buf []byte // Wire encoding - pver uint32 // Protocol version for wire encoding - btcnet wire.BitcoinNet // Bitcoin network for wire encoding - max int // Max size of fixed buffer to induce errors - readErr error // Expected read error - bytes int // Expected num bytes read + buf []byte // Wire encoding + pver uint32 // Protocol version for wire encoding + dcrnet wire.CurrencyNet // Decred network for wire encoding + max int // Max size of fixed buffer to induce errors + readErr error // Expected read error + bytes int // Expected num bytes read }{ // Latest protocol version with intentional read errors. - // Short header. + // Short header. [0] { []byte{}, pver, - btcnet, + dcrnet, 0, io.EOF, 0, }, - // Wrong network. Want MainNet, but giving TestNet3. + // Wrong network. Want MainNet, but giving TestNet. [1] { testNet3Bytes, pver, - btcnet, + dcrnet, len(testNet3Bytes), &wire.MessageError{}, 24, }, - // Exceed max overall message payload length. + // Exceed max overall message payload length. [2] { exceedMaxPayloadBytes, pver, - btcnet, + dcrnet, len(exceedMaxPayloadBytes), &wire.MessageError{}, 24, }, - // Invalid UTF-8 command. + // Invalid UTF-8 command. [3] { badCommandBytes, pver, - btcnet, + dcrnet, len(badCommandBytes), &wire.MessageError{}, 24, }, - // Valid, but unsupported command. + // Valid, but unsupported command. [4] { unsupportedCommandBytes, pver, - btcnet, + dcrnet, len(unsupportedCommandBytes), &wire.MessageError{}, 24, }, - // Exceed max allowed payload for a message of a specific type. + // Exceed max allowed payload for a message of a specific type. [5] { exceedTypePayloadBytes, pver, - btcnet, + dcrnet, len(exceedTypePayloadBytes), &wire.MessageError{}, 24, }, - // Message with a payload shorter than the header indicates. + // Message with a payload shorter than the header indicates. [6] { shortPayloadBytes, pver, - btcnet, + dcrnet, len(shortPayloadBytes), io.EOF, 24, }, - // Message with a bad checksum. + // Message with a bad checksum. [7] { badChecksumBytes, pver, - btcnet, + dcrnet, len(badChecksumBytes), &wire.MessageError{}, 26, }, - // Message with a valid header, but wrong format. + // Message with a valid header, but wrong format. [8] { badMessageBytes, pver, - btcnet, + dcrnet, len(badMessageBytes), - io.EOF, + &wire.MessageError{}, 25, }, - // 15k bytes of data to discard. + // 15k bytes of data to discard. [9] { discardBytes, pver, - btcnet, + dcrnet, len(discardBytes), &wire.MessageError{}, 24, @@ -347,7 +366,7 @@ func TestReadMessageWireErrors(t *testing.T) { for i, test := range tests { // Decode from wire format. r := newFixedReader(test.max, test.buf) - nr, _, _, err := wire.ReadMessageN(r, test.pver, test.btcnet) + nr, _, _, err := wire.ReadMessageN(r, test.pver, test.dcrnet) if reflect.TypeOf(err) != reflect.TypeOf(test.readErr) { t.Errorf("ReadMessage #%d wrong error got: %v <%T>, "+ "want: %T", i, err, err, test.readErr) @@ -377,7 +396,7 @@ func TestReadMessageWireErrors(t *testing.T) { // concrete messages to confirm error paths work correctly. func TestWriteMessageWireErrors(t *testing.T) { pver := wire.ProtocolVersion - btcnet := wire.MainNet + dcrnet := wire.MainNet wireErr := &wire.MessageError{} // Fake message with a command that is too long. @@ -400,32 +419,32 @@ func TestWriteMessageWireErrors(t *testing.T) { bogusMsg := &fakeMessage{command: "bogus", payload: bogusPayload} tests := []struct { - msg wire.Message // Message to encode - pver uint32 // Protocol version for wire encoding - btcnet wire.BitcoinNet // Bitcoin network for wire encoding - max int // Max size of fixed buffer to induce errors - err error // Expected error - bytes int // Expected num bytes written + msg wire.Message // Message to encode + pver uint32 // Protocol version for wire encoding + dcrnet wire.CurrencyNet // Decred network for wire encoding + max int // Max size of fixed buffer to induce errors + err error // Expected error + bytes int // Expected num bytes written }{ // Command too long. - {badCommandMsg, pver, btcnet, 0, wireErr, 0}, + {badCommandMsg, pver, dcrnet, 0, wireErr, 0}, // Force error in payload encode. - {encodeErrMsg, pver, btcnet, 0, wireErr, 0}, + {encodeErrMsg, pver, dcrnet, 0, wireErr, 0}, // Force error due to exceeding max overall message payload size. - {exceedOverallPayloadErrMsg, pver, btcnet, 0, wireErr, 0}, + {exceedOverallPayloadErrMsg, pver, dcrnet, 0, wireErr, 0}, // Force error due to exceeding max payload for message type. - {exceedPayloadErrMsg, pver, btcnet, 0, wireErr, 0}, + {exceedPayloadErrMsg, pver, dcrnet, 0, wireErr, 0}, // Force error in header write. - {bogusMsg, pver, btcnet, 0, io.ErrShortWrite, 0}, + {bogusMsg, pver, dcrnet, 0, io.ErrShortWrite, 0}, // Force error in payload write. - {bogusMsg, pver, btcnet, 24, io.ErrShortWrite, 24}, + {bogusMsg, pver, dcrnet, 24, io.ErrShortWrite, 24}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode wire format. w := newFixedWriter(test.max) - nw, err := wire.WriteMessageN(w, test.msg, test.pver, test.btcnet) + nw, err := wire.WriteMessageN(w, test.msg, test.pver, test.dcrnet) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("WriteMessage #%d wrong error got: %v <%T>, "+ "want: %T", i, err, err, test.err) diff --git a/wire/msgaddr.go b/wire/msgaddr.go index 26889010..ddaa5ecc 100644 --- a/wire/msgaddr.go +++ b/wire/msgaddr.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -88,7 +89,7 @@ func (msg *MsgAddr) BtcEncode(w io.Writer, pver uint32) error { // Protocol versions before MultipleAddressVersion only allowed 1 address // per message. count := len(msg.AddrList) - if pver < MultipleAddressVersion && count > 1 { + if pver < ProtocolVersion && count > 1 { str := fmt.Sprintf("too many addresses for message of "+ "protocol version %v [count %v, max 1]", pver, count) return messageError("MsgAddr.BtcEncode", str) @@ -124,7 +125,7 @@ func (msg *MsgAddr) Command() string { // MaxPayloadLength returns the maximum length the payload can be for the // receiver. This is part of the Message interface implementation. func (msg *MsgAddr) MaxPayloadLength(pver uint32) uint32 { - if pver < MultipleAddressVersion { + if pver < ProtocolVersion { // Num addresses (varInt) + a single net addresses. return MaxVarIntPayload + maxNetAddressPayload(pver) } diff --git a/wire/msgaddr_test.go b/wire/msgaddr_test.go index e92b2f1c..0103eaa9 100644 --- a/wire/msgaddr_test.go +++ b/wire/msgaddr_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,8 +13,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestAddr tests the MsgAddr API. @@ -76,30 +77,6 @@ func TestAddr(t *testing.T) { "not received") } - // Ensure max payload is expected value for protocol versions before - // timestamp was added to NetAddress. - // Num addresses (varInt) + max allowed addresses. - pver = wire.NetAddressTimeVersion - 1 - wantPayload = uint32(26009) - maxPayload = msg.MaxPayloadLength(pver) - if maxPayload != wantPayload { - t.Errorf("MaxPayloadLength: wrong max payload length for "+ - "protocol version %d - got %v, want %v", pver, - maxPayload, wantPayload) - } - - // Ensure max payload is expected value for protocol versions before - // multiple addresses were allowed. - // Num addresses (varInt) + a single net addresses. - pver = wire.MultipleAddressVersion - 1 - wantPayload = uint32(35) - maxPayload = msg.MaxPayloadLength(pver) - if maxPayload != wantPayload { - t.Errorf("MaxPayloadLength: wrong max payload length for "+ - "protocol version %d - got %v, want %v", pver, - maxPayload, wantPayload) - } - return } @@ -165,14 +142,6 @@ func TestAddrWire(t *testing.T) { multiAddrEncoded, wire.ProtocolVersion, }, - - // Protocol version MultipleAddressVersion-1 with no addresses. - { - noAddr, - noAddr, - noAddrEncoded, - wire.MultipleAddressVersion - 1, - }, } t.Logf("Running %d tests", len(tests)) @@ -210,7 +179,6 @@ func TestAddrWire(t *testing.T) { // of MsgAddr to confirm error paths work correctly. func TestAddrWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverMA := wire.MultipleAddressVersion wireErr := &wire.MessageError{} // A couple of NetAddresses to use for testing. @@ -271,9 +239,6 @@ func TestAddrWireErrors(t *testing.T) { {baseAddr, baseAddrEncoded, pver, 1, io.ErrShortWrite, io.EOF}, // Force error with greater than max inventory vectors. {maxAddr, maxAddrEncoded, pver, 3, wireErr, wireErr}, - // Force error with greater than max inventory vectors for - // protocol versions before multiple addresses were allowed. - {maxAddr, maxAddrEncoded, pverMA - 1, 3, wireErr, wireErr}, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgalert.go b/wire/msgalert.go index 2f9d2ed8..f9fafa6d 100644 --- a/wire/msgalert.go +++ b/wire/msgalert.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -95,7 +96,7 @@ const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) // for caculating maximum number of subversions, set all other var sizes to 0 // maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(string) // x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / sizeOf(string) -// subversion would typically be something like "/Satoshi:0.7.2/" (15 bytes) +// subversion would typically be something like "/Dcrd:0.7.2/" (15 bytes) // so assuming < 255 bytes, sizeOf(string) = sizeOf(uint8) + 255 = 256 const maxCountSetSubVer = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 256 @@ -319,7 +320,7 @@ func NewAlertFromPayload(serializedPayload []byte, pver uint32) (*Alert, error) return &alert, nil } -// MsgAlert implements the Message interface and defines a bitcoin alert +// MsgAlert implements the Message interface and defines a decred alert // message. // // This is a signed message that provides notifications that the client should @@ -338,7 +339,7 @@ type MsgAlert struct { Payload *Alert } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgAlert) BtcDecode(r io.Reader, pver uint32) error { var err error @@ -363,7 +364,7 @@ func (msg *MsgAlert) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgAlert) BtcEncode(w io.Writer, pver uint32) error { var err error @@ -411,7 +412,7 @@ func (msg *MsgAlert) MaxPayloadLength(pver uint32) uint32 { return MaxMessagePayload } -// NewMsgAlert returns a new bitcoin alert message that conforms to the Message +// NewMsgAlert returns a new decred alert message that conforms to the Message // interface. See MsgAlert for details. func NewMsgAlert(serializedPayload []byte, signature []byte) *MsgAlert { return &MsgAlert{ diff --git a/wire/msgalert_test.go b/wire/msgalert_test.go index e850d926..5398cc00 100644 --- a/wire/msgalert_test.go +++ b/wire/msgalert_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestMsgAlert tests the MsgAlert API. @@ -107,38 +108,6 @@ func TestMsgAlertWire(t *testing.T) { baseMsgAlertEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - baseMsgAlert, - baseMsgAlert, - baseMsgAlertEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgblock.go b/wire/msgblock.go index 0d20569e..b5ba73ce 100644 --- a/wire/msgblock.go +++ b/wire/msgblock.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,6 +9,8 @@ import ( "bytes" "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) // defaultTransactionAlloc is the default size used for the backing array @@ -23,9 +26,9 @@ const MaxBlocksPerMsg = 500 // MaxBlockPayload is the maximum bytes a block message can be in bytes. const MaxBlockPayload = 1000000 // Not actually 1MB which would be 1024 * 1024 -// maxTxPerBlock is the maximum number of transactions that could -// possibly fit into a block. -const maxTxPerBlock = (MaxBlockPayload / minTxPayload) + 1 +// MaxTxPerTxTree is the maximum number of transactions that could +// possibly fit into a block per each merkle root. +const MaxTxPerTxTree = ((MaxBlockPayload / minTxPayload) / 2) + 1 // TxLoc holds locator data for the offset and length of where a transaction is // located within a MsgBlock data buffer. @@ -34,12 +37,13 @@ type TxLoc struct { TxLen int } -// MsgBlock implements the Message interface and represents a bitcoin +// MsgBlock implements the Message interface and represents a decred // block message. It is used to deliver block and transaction information in // response to a getdata message (MsgGetData) for a given block hash. type MsgBlock struct { - Header BlockHeader - Transactions []*MsgTx + Header BlockHeader + Transactions []*MsgTx + STransactions []*MsgTx } // AddTransaction adds a transaction to the message. @@ -49,12 +53,23 @@ func (msg *MsgBlock) AddTransaction(tx *MsgTx) error { } +// AddSTransaction adds a stake transaction to the message. +func (msg *MsgBlock) AddSTransaction(tx *MsgTx) error { + msg.STransactions = append(msg.STransactions, tx) + return nil +} + // ClearTransactions removes all transactions from the message. func (msg *MsgBlock) ClearTransactions() { msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc) } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// ClearSTransactions removes all stake transactions from the message. +func (msg *MsgBlock) ClearSTransactions() { + msg.STransactions = make([]*MsgTx, 0, defaultTransactionAlloc) +} + +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. // See Deserialize for decoding blocks stored to disk, such as in a database, as // opposed to decoding blocks from the wire. @@ -69,12 +84,13 @@ func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error { return err } - // Prevent more transactions than could possibly fit into a block. + // Prevent more transactions than could possibly fit into the regular + // tx tree. // It would be possible to cause memory exhaustion and panics without // a sane upper bound on this count. - if txCount > maxTxPerBlock { + if txCount > MaxTxPerTxTree { str := fmt.Sprintf("too many transactions to fit into a block "+ - "[count %d, max %d]", txCount, maxTxPerBlock) + "[count %d, max %d]", txCount, MaxTxPerTxTree) return messageError("MsgBlock.BtcDecode", str) } @@ -88,13 +104,37 @@ func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error { msg.Transactions = append(msg.Transactions, &tx) } + // Prevent more transactions than could possibly fit into the stake + // tx tree. + // It would be possible to cause memory exhaustion and panics without + // a sane upper bound on this count. + stakeTxCount, err := readVarInt(r, pver) + if err != nil { + return err + } + if stakeTxCount > MaxTxPerTxTree { + str := fmt.Sprintf("too many stransactions to fit into a block "+ + "[count %d, max %d]", stakeTxCount, MaxTxPerTxTree) + return messageError("MsgBlock.BtcDecode", str) + } + + msg.STransactions = make([]*MsgTx, 0, stakeTxCount) + for i := uint64(0); i < stakeTxCount; i++ { + tx := MsgTx{} + err := tx.BtcDecode(r, pver) + if err != nil { + return err + } + msg.STransactions = append(msg.STransactions, &tx) + } + return nil } // Deserialize decodes a block from r into the receiver using a format that is // suitable for long-term storage such as a database while respecting the // Version field in the block. This function differs from BtcDecode in that -// BtcDecode decodes from the bitcoin wire protocol as it was sent across the +// BtcDecode decodes from the decred wire protocol as it was sent across the // network. The wire encoding can technically differ depending on the protocol // version and doesn't even really need to match the format of a stored block at // all. As of the time this comment was written, the encoded block is the same @@ -107,11 +147,17 @@ func (msg *MsgBlock) Deserialize(r io.Reader) error { return msg.BtcDecode(r, 0) } +// FromBytes deserializes a transaction byte slice. +func (msg *MsgBlock) FromBytes(b []byte) error { + r := bytes.NewReader(b) + return msg.Deserialize(r) +} + // DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes // a byte buffer instead of a generic reader and returns a slice containing the // start and length of each transaction within the raw data that is being // deserialized. -func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { +func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, []TxLoc, error) { fullLen := r.Len() // At the current time, there is no difference between the wire encoding @@ -119,21 +165,21 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { // a result, make use of existing wire protocol functions. err := readBlockHeader(r, 0, &msg.Header) if err != nil { - return nil, err + return nil, nil, err } txCount, err := readVarInt(r, 0) if err != nil { - return nil, err + return nil, nil, err } - - // Prevent more transactions than could possibly fit into a block. + // Prevent more transactions than could possibly fit into a normal tx + // tree. // It would be possible to cause memory exhaustion and panics without // a sane upper bound on this count. - if txCount > maxTxPerBlock { + if txCount > MaxTxPerTxTree { str := fmt.Sprintf("too many transactions to fit into a block "+ - "[count %d, max %d]", txCount, maxTxPerBlock) - return nil, messageError("MsgBlock.DeserializeTxLoc", str) + "[count %d, max %d]", txCount, MaxTxPerTxTree) + return nil, nil, messageError("MsgBlock.DeserializeTxLoc", str) } // Deserialize each transaction while keeping track of its location @@ -145,16 +191,46 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { tx := MsgTx{} err := tx.Deserialize(r) if err != nil { - return nil, err + return nil, nil, err } msg.Transactions = append(msg.Transactions, &tx) txLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart } - return txLocs, nil + stakeTxCount, err := readVarInt(r, 0) + if err != nil { + return nil, nil, err + } + + // Prevent more transactions than could possibly fit into a stake tx + // tree. + // It would be possible to cause memory exhaustion and panics without + // a sane upper bound on this count. + if stakeTxCount > MaxTxPerTxTree { + str := fmt.Sprintf("too many transactions to fit into a stake tx tree "+ + "[count %d, max %d]", stakeTxCount, MaxTxPerTxTree) + return nil, nil, messageError("MsgBlock.DeserializeTxLoc", str) + } + + // Deserialize each transaction while keeping track of its location + // within the byte stream. + msg.STransactions = make([]*MsgTx, 0, stakeTxCount) + sTxLocs := make([]TxLoc, stakeTxCount) + for i := uint64(0); i < stakeTxCount; i++ { + sTxLocs[i].TxStart = fullLen - r.Len() + tx := MsgTx{} + err := tx.Deserialize(r) + if err != nil { + return nil, nil, err + } + msg.STransactions = append(msg.STransactions, &tx) + sTxLocs[i].TxLen = (fullLen - r.Len()) - sTxLocs[i].TxStart + } + + return txLocs, sTxLocs, nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. // See Serialize for encoding blocks to be stored to disk, such as in a // database, as opposed to encoding blocks for the wire. @@ -176,13 +252,25 @@ func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error { } } + err = writeVarInt(w, pver, uint64(len(msg.STransactions))) + if err != nil { + return err + } + + for _, tx := range msg.STransactions { + err = tx.BtcEncode(w, pver) + if err != nil { + return err + } + } + return nil } // Serialize encodes the block to w using a format that suitable for long-term // storage such as a database while respecting the Version field in the block. // This function differs from BtcEncode in that BtcEncode encodes the block to -// the bitcoin wire protocol in order to be sent across the network. The wire +// the decred wire protocol in order to be sent across the network. The wire // encoding can technically differ depending on the protocol version and doesn't // even really need to match the format of a stored block at all. As of the // time this comment was written, the encoded block is the same in both @@ -195,17 +283,38 @@ func (msg *MsgBlock) Serialize(w io.Writer) error { return msg.BtcEncode(w, 0) } +// Bytes returns the serialized form of the block in bytes. +func (msg *MsgBlock) Bytes() ([]byte, error) { + // Serialize the MsgTx. + var w bytes.Buffer + err := msg.Serialize(&w) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + // SerializeSize returns the number of bytes it would take to serialize the // the block. func (msg *MsgBlock) SerializeSize() int { + // Check to make sure that all transactions have the correct + // type and version to be included in a block. + // Block header bytes + Serialized varint size for the number of - // transactions. - n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions))) + // transactions + Serialized varint size for the number of + // stake transactions + + n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions))) + + VarIntSerializeSize(uint64(len(msg.STransactions))) for _, tx := range msg.Transactions { n += tx.SerializeSize() } + for _, tx := range msg.STransactions { + n += tx.SerializeSize() + } + return n } @@ -225,24 +334,38 @@ func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 { } // BlockSha computes the block identifier hash for this block. -func (msg *MsgBlock) BlockSha() ShaHash { +func (msg *MsgBlock) BlockSha() chainhash.Hash { return msg.Header.BlockSha() } // TxShas returns a slice of hashes of all of transactions in this block. -func (msg *MsgBlock) TxShas() ([]ShaHash, error) { - shaList := make([]ShaHash, 0, len(msg.Transactions)) +func (msg *MsgBlock) TxShas() []chainhash.Hash { + shaList := make([]chainhash.Hash, 0, len(msg.Transactions)) for _, tx := range msg.Transactions { - shaList = append(shaList, tx.TxSha()) + txSha := tx.TxSha() + shaList = append(shaList, txSha) } - return shaList, nil + return shaList } -// NewMsgBlock returns a new bitcoin block message that conforms to the +// STxShas returns a slice of hashes of all of stake transactions in this block. +func (msg *MsgBlock) STxShas() []chainhash.Hash { + shaList := make([]chainhash.Hash, 0, len(msg.STransactions)) + for _, tx := range msg.STransactions { + // Ignore error here since TxSha can't fail in the current + // implementation except due to run-time panics. + sha := tx.TxSha() + shaList = append(shaList, sha) + } + return shaList +} + +// NewMsgBlock returns a new decred block message that conforms to the // Message interface. See MsgBlock for details. func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock { return &MsgBlock{ - Header: *blockHeader, - Transactions: make([]*MsgTx, 0, defaultTransactionAlloc), + Header: *blockHeader, + Transactions: make([]*MsgTx, 0, defaultTransactionAlloc), + STransactions: make([]*MsgTx, 0, defaultTransactionAlloc), } } diff --git a/wire/msgblock_test.go b/wire/msgblock_test.go index 7fe0cd75..0b8f7bdf 100644 --- a/wire/msgblock_test.go +++ b/wire/msgblock_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,20 +12,36 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // TestBlock tests the MsgBlock API. func TestBlock(t *testing.T) { pver := wire.ProtocolVersion - // Block 1 header. - prevHash := &blockOne.Header.PrevBlock - merkleHash := &blockOne.Header.MerkleRoot - bits := blockOne.Header.Bits - nonce := blockOne.Header.Nonce - bh := wire.NewBlockHeader(prevHash, merkleHash, bits, nonce) + // Test block header. + bh := wire.NewBlockHeader( + int32(pver), // Version + &testBlock.Header.PrevBlock, // PrevHash + &testBlock.Header.MerkleRoot, // MerkleRoot + &testBlock.Header.StakeRoot, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + testBlock.Header.Bits, // Bits + int64(0x0000000000000000), // Sbits + uint32(1), // Height + uint32(1), // Size + testBlock.Header.Nonce, // Nonce + [36]byte{}, // ExtraData + ) // Ensure the command is expected value. wantCmd := "block" @@ -51,12 +68,12 @@ func TestBlock(t *testing.T) { } // Ensure transactions are added properly. - tx := blockOne.Transactions[0].Copy() + tx := testBlock.Transactions[0].Copy() msg.AddTransaction(tx) - if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) { + if !reflect.DeepEqual(msg.Transactions, testBlock.Transactions) { t.Errorf("AddTransaction: wrong transactions - got %v, want %v", spew.Sdump(msg.Transactions), - spew.Sdump(blockOne.Transactions)) + spew.Sdump(testBlock.Transactions)) } // Ensure transactions are properly cleared. @@ -66,6 +83,22 @@ func TestBlock(t *testing.T) { len(msg.Transactions), 0) } + // Ensure stake transactions are added properly. + stx := testBlock.STransactions[0].Copy() + msg.AddSTransaction(stx) + if !reflect.DeepEqual(msg.STransactions, testBlock.STransactions) { + t.Errorf("AddSTransaction: wrong transactions - got %v, want %v", + spew.Sdump(msg.STransactions), + spew.Sdump(testBlock.STransactions)) + } + + // Ensure transactions are properly cleared. + msg.ClearSTransactions() + if len(msg.STransactions) != 0 { + t.Errorf("ClearTransactions: wrong transactions - got %v, want %v", + len(msg.STransactions), 0) + } + return } @@ -73,35 +106,51 @@ func TestBlock(t *testing.T) { // hashes from a block accurately. func TestBlockTxShas(t *testing.T) { // Block 1, transaction 1 hash. - hashStr := "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098" - wantHash, err := wire.NewShaHashFromStr(hashStr) + hashStr := "55a25248c04dd8b6599ca2a708413c00d79ae90ce075c54e8a967a647d7e4bea" + wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) return } - wantShas := []wire.ShaHash{*wantHash} - shas, err := blockOne.TxShas() - if err != nil { - t.Errorf("TxShas: %v", err) - } + wantShas := []chainhash.Hash{*wantHash} + shas := testBlock.TxShas() if !reflect.DeepEqual(shas, wantShas) { t.Errorf("TxShas: wrong transaction hashes - got %v, want %v", spew.Sdump(shas), spew.Sdump(wantShas)) } } +// TestBlockSTxShas tests the ability to generate a slice of all stake transaction +// hashes from a block accurately. +func TestBlockSTxShas(t *testing.T) { + // Block 1, transaction 1 hash. + hashStr := "ae208a69f3ee088d0328126e3d9bef7652b108d1904f27b166c5999233a801d4" + wantHash, err := chainhash.NewHashFromStr(hashStr) + if err != nil { + t.Errorf("NewShaHashFromStr: %v", err) + return + } + + wantShas := []chainhash.Hash{*wantHash} + shas := testBlock.STxShas() + if !reflect.DeepEqual(shas, wantShas) { + t.Errorf("STxShas: wrong transaction hashes - got %v, want %v", + spew.Sdump(shas), spew.Sdump(wantShas)) + } +} + // TestBlockSha tests the ability to generate the hash of a block accurately. func TestBlockSha(t *testing.T) { // Block 1 hash. - hashStr := "839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048" - wantHash, err := wire.NewShaHashFromStr(hashStr) + hashStr := "152437dada95368c42b19febc1702939fa9c1ccdb6fd7284e5b7a19d8fe6df7a" + wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Ensure the hash produced is expected. - blockHash := blockOne.BlockSha() + blockHash := testBlock.BlockSha() if !blockHash.IsEqual(wantHash) { t.Errorf("BlockSha: wrong hash - got %v, want %v", spew.Sprint(blockHash), spew.Sprint(wantHash)) @@ -112,56 +161,22 @@ func TestBlockSha(t *testing.T) { // of transaction inputs and outputs and protocol versions. func TestBlockWire(t *testing.T) { tests := []struct { - in *wire.MsgBlock // Message to encode - out *wire.MsgBlock // Expected decoded message - buf []byte // Wire encoding - txLocs []wire.TxLoc // Expected transaction locations - pver uint32 // Protocol version for wire encoding + in *wire.MsgBlock // Message to encode + out *wire.MsgBlock // Expected decoded message + buf []byte // Wire encoding + txLocs []wire.TxLoc // Expected transaction locations + sTxLocs []wire.TxLoc // Expected stake transaction locations + pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, + &testBlock, + &testBlock, + testBlockBytes, + testBlockTxLocs, + testBlockSTxLocs, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version. - { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -210,23 +225,42 @@ func TestBlockWireErrors(t *testing.T) { max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error - }{ - // Force error in version. - {&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF}, + }{ // Force error in version. + {&testBlock, testBlockBytes, pver, 0, io.ErrShortWrite, io.EOF}, // 0 // Force error in prev block hash. - {&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, pver, 4, io.ErrShortWrite, io.EOF}, // 1 // Force error in merkle root. - {&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF}, - // Force error in timestamp. - {&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, pver, 36, io.ErrShortWrite, io.EOF}, // 2 + // Force error in stake root. + {&testBlock, testBlockBytes, pver, 68, io.ErrShortWrite, io.EOF}, // 3 + // Force error in vote bits. + {&testBlock, testBlockBytes, pver, 100, io.ErrShortWrite, io.EOF}, // 4 + // Force error in finalState. + {&testBlock, testBlockBytes, pver, 102, io.ErrShortWrite, io.EOF}, // 5 + // Force error in voters. + {&testBlock, testBlockBytes, pver, 108, io.ErrShortWrite, io.EOF}, // 6 + // Force error in freshstake. + {&testBlock, testBlockBytes, pver, 110, io.ErrShortWrite, io.EOF}, // 7 + // Force error in revocations. + {&testBlock, testBlockBytes, pver, 111, io.ErrShortWrite, io.EOF}, // 8 + // Force error in poolsize. + {&testBlock, testBlockBytes, pver, 112, io.ErrShortWrite, io.EOF}, // 9 // Force error in difficulty bits. - {&blockOne, blockOneBytes, pver, 72, io.ErrShortWrite, io.EOF}, - // Force error in header nonce. - {&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF}, - // Force error in transaction count. - {&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF}, - // Force error in transactions. - {&blockOne, blockOneBytes, pver, 81, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, pver, 116, io.ErrShortWrite, io.EOF}, // 10 + // Force error in stake difficulty bits. + {&testBlock, testBlockBytes, pver, 120, io.ErrShortWrite, io.EOF}, // 11 + // Force error in height. + {&testBlock, testBlockBytes, pver, 128, io.ErrShortWrite, io.EOF}, // 12 + // Force error in size. + {&testBlock, testBlockBytes, pver, 132, io.ErrShortWrite, io.EOF}, // 13 + // Force error in timestamp. + {&testBlock, testBlockBytes, pver, 136, io.ErrShortWrite, io.EOF}, // 14 + // Force error in nonce. + {&testBlock, testBlockBytes, pver, 140, io.ErrShortWrite, io.EOF}, // 15 + // Force error in tx count. + {&testBlock, testBlockBytes, pver, 180, io.ErrShortWrite, io.EOF}, // 16 + // Force error in tx. + {&testBlock, testBlockBytes, pver, 181, io.ErrShortWrite, io.EOF}, // 17 } t.Logf("Running %d tests", len(tests)) @@ -255,16 +289,18 @@ func TestBlockWireErrors(t *testing.T) { // TestBlockSerialize tests MsgBlock serialize and deserialize. func TestBlockSerialize(t *testing.T) { tests := []struct { - in *wire.MsgBlock // Message to encode - out *wire.MsgBlock // Expected decoded message - buf []byte // Serialized data - txLocs []wire.TxLoc // Expected transaction locations + in *wire.MsgBlock // Message to encode + out *wire.MsgBlock // Expected decoded message + buf []byte // Serialized data + txLocs []wire.TxLoc // Expected transaction locations + sTxLocs []wire.TxLoc // Expected stake transaction locations }{ { - &blockOne, - &blockOne, - blockOneBytes, - blockOneTxLocs, + &testBlock, + &testBlock, + testBlockBytes, + testBlockTxLocs, + testBlockSTxLocs, }, } @@ -301,7 +337,7 @@ func TestBlockSerialize(t *testing.T) { // information. var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf) - txLocs, err := txLocBlock.DeserializeTxLoc(br) + txLocs, sTxLocs, err := txLocBlock.DeserializeTxLoc(br) if err != nil { t.Errorf("DeserializeTxLoc #%d error %v", i, err) continue @@ -316,6 +352,11 @@ func TestBlockSerialize(t *testing.T) { spew.Sdump(txLocs), spew.Sdump(test.txLocs)) continue } + if !reflect.DeepEqual(sTxLocs, test.sTxLocs) { + t.Errorf("DeserializeTxLoc, sTxLocs #%d\n got: %s want: %s", i, + spew.Sdump(sTxLocs), spew.Sdump(test.sTxLocs)) + continue + } } } @@ -329,22 +370,41 @@ func TestBlockSerializeErrors(t *testing.T) { writeErr error // Expected write error readErr error // Expected read error }{ - // Force error in version. - {&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, 0, io.ErrShortWrite, io.EOF}, // 0 // Force error in prev block hash. - {&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, 4, io.ErrShortWrite, io.EOF}, // 1 // Force error in merkle root. - {&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF}, - // Force error in timestamp. - {&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, 36, io.ErrShortWrite, io.EOF}, // 2 + // Force error in stake root. + {&testBlock, testBlockBytes, 68, io.ErrShortWrite, io.EOF}, // 3 + // Force error in vote bits. + {&testBlock, testBlockBytes, 100, io.ErrShortWrite, io.EOF}, // 4 + // Force error in finalState. + {&testBlock, testBlockBytes, 102, io.ErrShortWrite, io.EOF}, // 5 + // Force error in voters. + {&testBlock, testBlockBytes, 108, io.ErrShortWrite, io.EOF}, // 8 + // Force error in freshstake. + {&testBlock, testBlockBytes, 110, io.ErrShortWrite, io.EOF}, // 9 + // Force error in revocations. + {&testBlock, testBlockBytes, 111, io.ErrShortWrite, io.EOF}, // 10 + // Force error in poolsize. + {&testBlock, testBlockBytes, 112, io.ErrShortWrite, io.EOF}, // 11 // Force error in difficulty bits. - {&blockOne, blockOneBytes, 72, io.ErrShortWrite, io.EOF}, - // Force error in header nonce. - {&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF}, - // Force error in transaction count. - {&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF}, - // Force error in transactions. - {&blockOne, blockOneBytes, 81, io.ErrShortWrite, io.EOF}, + {&testBlock, testBlockBytes, 116, io.ErrShortWrite, io.EOF}, // 12 + // Force error in stake difficulty bits. + {&testBlock, testBlockBytes, 120, io.ErrShortWrite, io.EOF}, // 13 + // Force error in height. + {&testBlock, testBlockBytes, 128, io.ErrShortWrite, io.EOF}, // 14 + // Force error in size. + {&testBlock, testBlockBytes, 132, io.ErrShortWrite, io.EOF}, // 15 + // Force error in timestamp. + {&testBlock, testBlockBytes, 136, io.ErrShortWrite, io.EOF}, // 16 + // Force error in nonce. + {&testBlock, testBlockBytes, 140, io.ErrShortWrite, io.EOF}, // 17 + // Force error in tx count. + {&testBlock, testBlockBytes, 180, io.ErrShortWrite, io.EOF}, // 18 + // Force error in tx. + {&testBlock, testBlockBytes, 181, io.ErrShortWrite, io.EOF}, // 19 } t.Logf("Running %d tests", len(tests)) @@ -370,7 +430,7 @@ func TestBlockSerializeErrors(t *testing.T) { var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf[0:test.max]) - _, err = txLocBlock.DeserializeTxLoc(br) + _, _, err = txLocBlock.DeserializeTxLoc(br) if err != test.readErr { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v", i, err, test.readErr) @@ -387,7 +447,7 @@ func TestBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. - pver := uint32(70001) + pver := uint32(1) tests := []struct { buf []byte // Wire encoding @@ -406,9 +466,27 @@ func TestBlockOverflowErrors(t *testing.T) { 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot - 0x61, 0xbc, 0x66, 0x49, // Timestamp + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, // Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits + 0x01, 0x00, 0x00, 0x00, // Height + 0x01, 0x00, 0x00, 0x00, // Size + 0x61, 0xbc, 0x66, 0x49, // Timestamp 0x01, 0xe3, 0x62, 0x99, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // TxnCount }, pver, &wire.MessageError{}, @@ -438,7 +516,7 @@ func TestBlockOverflowErrors(t *testing.T) { // Deserialize with transaction location info from wire format. br := bytes.NewBuffer(test.buf) - _, err = msg.DeserializeTxLoc(br) + _, _, err = msg.DeserializeTxLoc(br) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+ "want: %v", i, err, reflect.TypeOf(test.err)) @@ -451,17 +529,17 @@ func TestBlockOverflowErrors(t *testing.T) { // various blocks is accurate. func TestBlockSerializeSize(t *testing.T) { // Block with no transactions. - noTxBlock := wire.NewMsgBlock(&blockOne.Header) + noTxBlock := wire.NewMsgBlock(&testBlock.Header) tests := []struct { in *wire.MsgBlock // Block to encode size int // Expected serialized size }{ - // Block with no transactions. - {noTxBlock, 81}, + // Block with no transactions (header + 2x numtx) + {noTxBlock, 182}, // First block in the mainnet block chain. - {&blockOne, len(blockOneBytes)}, + {&testBlock, len(testBlockBytes)}, } t.Logf("Running %d tests", len(tests)) @@ -475,25 +553,41 @@ func TestBlockSerializeSize(t *testing.T) { } } -var blockOne = wire.MsgBlock{ +// testBlock is a basic normative block that is used throughout tests. +var testBlock = wire.MsgBlock{ Header: wire.BlockHeader{ Version: 1, - PrevBlock: wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, }), - MerkleRoot: wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, }), - - Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST - Bits: 0x1d00ffff, // 486604799 - Nonce: 0x9962e301, // 2573394689 + StakeRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, + }), + VoteBits: uint16(0x0000), + FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Voters: uint16(0x0000), + FreshStake: uint8(0x00), + Revocations: uint8(0x00), + PoolSize: uint32(0x00000000), // Poolsize + Bits: 0x1d00ffff, // 486604799 + SBits: int64(0x0000000000000000), + Height: uint32(1), + Size: uint32(1), + Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST + Nonce: 0x9962e301, // 2573394689 + ExtraData: [36]byte{}, }, Transactions: []*wire.MsgTx{ { @@ -501,18 +595,23 @@ var blockOne = wire.MsgBlock{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 0xffffffff, + Tree: dcrutil.TxTreeRegular, }, + Sequence: 0xffffffff, + ValueIn: 0x1616161616161616, + BlockHeight: 0x17171717, + BlockIndex: 0x18181818, SignatureScript: []byte{ - 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, + 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, }, - Sequence: 0xffffffff, }, }, TxOut: []*wire.TxOut{ { - Value: 0x12a05f200, + Value: 0x3333333333333333, + Version: 0x9898, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, @@ -528,38 +627,102 @@ var blockOne = wire.MsgBlock{ }, }, }, - LockTime: 0, + LockTime: 0x11111111, + Expiry: 0x22222222, + }, + }, + STransactions: []*wire.MsgTx{ + { + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + Tree: dcrutil.TxTreeStake, + }, + Sequence: 0xffffffff, + ValueIn: 0x1313131313131313, + BlockHeight: 0x14141414, + BlockIndex: 0x15151515, + SignatureScript: []byte{ + 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, + }, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x3333333333333333, + Version: 0x1212, + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0x11111111, + Expiry: 0x22222222, }, }, } -// Block one serialized bytes. -var blockOneBytes = []byte{ - 0x01, 0x00, 0x00, 0x00, // Version 1 +// testBlockBytes is the serialized bytes for the above test block (testBlock). +var testBlockBytes = []byte{ + // Begin block header + 0x01, 0x00, 0x00, 0x00, // Version 1 [0] 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock [4] 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot - 0x61, 0xbc, 0x66, 0x49, // Timestamp - 0xff, 0xff, 0x00, 0x1d, // Bits - 0x01, 0xe3, 0x62, 0x99, // Nonce - 0x01, // TxnCount - 0x01, 0x00, 0x00, 0x00, // Version - 0x01, // Varint for number of transaction inputs + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot [36] + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot [68] + 0x00, 0x00, // VoteBits [100] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState [102] + 0x00, 0x00, // Voters [108] + 0x00, // FreshStake [110] + 0x00, // Revocations [111] + 0x00, 0x00, 0x00, 0x00, // Poolsize [112] + 0xff, 0xff, 0x00, 0x1d, // Bits [116] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits [120] + 0x01, 0x00, 0x00, 0x00, // Height [128] + 0x01, 0x00, 0x00, 0x00, // Size [132] + 0x61, 0xbc, 0x66, 0x49, // Timestamp [136] + 0x01, 0xe3, 0x62, 0x99, // Nonce [140] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData [144] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index - 0x07, // Varint for length of signature script - 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script (coinbase) - 0xff, 0xff, 0xff, 0xff, // Sequence - 0x01, // Varint for number of transaction outputs - 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount + 0x00, 0x00, 0x00, 0x00, + // Announce number of txs + 0x01, // TxnCount [180] + // Begin bogus normal txs + 0x01, 0x00, 0x00, 0x00, // Version [181] + 0x01, // Varint for number of transaction inputs [185] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash [186] + 0xff, 0xff, 0xff, 0xff, // Prevous output index [218] + 0x00, // Previous output tree [222] + 0xff, 0xff, 0xff, 0xff, // Sequence [223] + 0x01, // Varint for number of transaction outputs [227] + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // Transaction amount [228] + 0x98, 0x98, // Script version 0x43, // Varint for length of pk script 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, @@ -570,12 +733,59 @@ var blockOneBytes = []byte{ 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, - 0xee, // 65-byte uncompressed public key + 0xee, // 65-byte signature 0xac, // OP_CHECKSIG - 0x00, 0x00, 0x00, 0x00, // Lock time + 0x11, 0x11, 0x11, 0x11, // Lock time + 0x22, 0x22, 0x22, 0x22, // Expiry + 0x01, // Varint for number of signatures + 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, // ValueIn + 0x17, 0x17, 0x17, 0x17, // BlockHeight + 0x18, 0x18, 0x18, 0x18, // BlockIndex + 0x07, // SigScript length + 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, // Signature script (coinbase) + // Announce number of stake txs + 0x01, // TxnCount for stake tx + // Begin bogus stake txs + 0x01, 0x00, 0x00, 0x00, // Version + 0x01, // Varint for number of transaction inputs + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash + 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0x01, // Previous output tree + 0xff, 0xff, 0xff, 0xff, // Sequence + 0x01, // Varint for number of transaction outputs + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // Transaction amount + 0x12, 0x12, // Script version + 0x43, // Varint for length of pk script + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x11, 0x11, 0x11, 0x11, // Lock time + 0x22, 0x22, 0x22, 0x22, // Expiry + 0x01, // Varint for number of signatures + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, // ValueIn + 0x14, 0x14, 0x14, 0x14, // BlockHeight + 0x15, 0x15, 0x15, 0x15, // BlockIndex + 0x07, // SigScript length + 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, // Signature script (coinbase) } -// Transaction location information for block one transactions. -var blockOneTxLocs = []wire.TxLoc{ - {TxStart: 81, TxLen: 134}, +// Transaction location information for the test block transactions. +var testBlockTxLocs = []wire.TxLoc{ + {TxStart: 181, TxLen: 158}, +} + +// Transaction location information for the test block stake transactions. +var testBlockSTxLocs = []wire.TxLoc{ + {TxStart: 340, TxLen: 158}, } diff --git a/wire/msgfilteradd.go b/wire/msgfilteradd.go index 0db84555..3cbbbf2d 100644 --- a/wire/msgfilteradd.go +++ b/wire/msgfilteradd.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,7 +17,7 @@ const ( MaxFilterAddDataSize = 520 ) -// MsgFilterAdd implements the Message interface and represents a bitcoin +// MsgFilterAdd implements the Message interface and represents a decred // filteradd message. It is used to add a data element to an existing Bloom // filter. // @@ -25,15 +26,9 @@ type MsgFilterAdd struct { Data []byte } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgFilterAdd) BtcDecode(r io.Reader, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filteradd message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterAdd.BtcDecode", str) - } - var err error msg.Data, err = readVarBytes(r, pver, MaxFilterAddDataSize, "filteradd data") @@ -44,15 +39,9 @@ func (msg *MsgFilterAdd) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgFilterAdd) BtcEncode(w io.Writer, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filteradd message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterAdd.BtcEncode", str) - } - size := len(msg.Data) if size > MaxFilterAddDataSize { str := fmt.Sprintf("filteradd size too large for message "+ @@ -81,7 +70,7 @@ func (msg *MsgFilterAdd) MaxPayloadLength(pver uint32) uint32 { MaxFilterAddDataSize } -// NewMsgFilterAdd returns a new bitcoin filteradd message that conforms to the +// NewMsgFilterAdd returns a new decred filteradd message that conforms to the // Message interface. See MsgFilterAdd for details. func NewMsgFilterAdd(data []byte) *MsgFilterAdd { return &MsgFilterAdd{ diff --git a/wire/msgfilteradd_test.go b/wire/msgfilteradd_test.go index c4557306..7ce594df 100644 --- a/wire/msgfilteradd_test.go +++ b/wire/msgfilteradd_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,7 +11,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // TestFilterAddLatest tests the MsgFilterAdd API against the latest protocol @@ -54,38 +55,6 @@ func TestFilterAddLatest(t *testing.T) { return } -// TestFilterAddCrossProtocol tests the MsgFilterAdd API when encoding with the -// latest protocol version and decoding with BIP0031Version. -func TestFilterAddCrossProtocol(t *testing.T) { - data := []byte{0x01, 0x02} - msg := wire.NewMsgFilterAdd(data) - if !bytes.Equal(msg.Data, data) { - t.Errorf("should get same data back out") - } - - // Encode with latest protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of MsgFilterAdd failed %v err <%v>", msg, err) - } - - // Decode with old protocol version. - var readmsg wire.MsgFilterAdd - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err == nil { - t.Errorf("decode of MsgFilterAdd succeeded when it shouldn't "+ - "have %v", msg) - } - - // Since one of the protocol versions doesn't support the filteradd - // message, make sure the data didn't get encoded and decoded back out. - if bytes.Equal(msg.Data, readmsg.Data) { - t.Error("should not get same data for cross protocol") - } - -} - // TestFilterAddMaxDataSize tests the MsgFilterAdd API maximum data size. func TestFilterAddMaxDataSize(t *testing.T) { data := bytes.Repeat([]byte{0xff}, 521) @@ -112,8 +81,6 @@ func TestFilterAddMaxDataSize(t *testing.T) { // of MsgFilterAdd to confirm error paths work correctly. func TestFilterAddWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverNoFilterAdd := wire.BIP0037Version - 1 - wireErr := &wire.MessageError{} baseData := []byte{0x01, 0x02, 0x03, 0x04} baseFilterAdd := wire.NewMsgFilterAdd(baseData) @@ -138,11 +105,6 @@ func TestFilterAddWireErrors(t *testing.T) { baseFilterAdd, baseFilterAddEncoded, pver, 1, io.ErrShortWrite, io.EOF, }, - // Force error due to unsupported protocol version. - { - baseFilterAdd, baseFilterAddEncoded, pverNoFilterAdd, 5, - wireErr, wireErr, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgfilterclear.go b/wire/msgfilterclear.go index b82d6b85..26fc1a5c 100644 --- a/wire/msgfilterclear.go +++ b/wire/msgfilterclear.go @@ -1,42 +1,30 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire import ( - "fmt" "io" ) -// MsgFilterClear implements the Message interface and represents a bitcoin +// MsgFilterClear implements the Message interface and represents a decred // filterclear message which is used to reset a Bloom filter. // // This message was not added until protocol version BIP0037Version and has // no payload. type MsgFilterClear struct{} -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgFilterClear) BtcDecode(r io.Reader, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filterclear message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterClear.BtcDecode", str) - } - return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgFilterClear) BtcEncode(w io.Writer, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filterclear message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterClear.BtcEncode", str) - } - return nil } @@ -52,7 +40,7 @@ func (msg *MsgFilterClear) MaxPayloadLength(pver uint32) uint32 { return 0 } -// NewMsgFilterClear returns a new bitcoin filterclear message that conforms to the Message +// NewMsgFilterClear returns a new decred filterclear message that conforms to the Message // interface. See MsgFilterClear for details. func NewMsgFilterClear() *MsgFilterClear { return &MsgFilterClear{} diff --git a/wire/msgfilterclear_test.go b/wire/msgfilterclear_test.go index edd210c6..98ebdefe 100644 --- a/wire/msgfilterclear_test.go +++ b/wire/msgfilterclear_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestFilterCLearLatest tests the MsgFilterClear API against the latest @@ -39,27 +40,6 @@ func TestFilterClearLatest(t *testing.T) { return } -// TestFilterClearCrossProtocol tests the MsgFilterClear API when encoding with -// the latest protocol version and decoding with BIP0031Version. -func TestFilterClearCrossProtocol(t *testing.T) { - msg := wire.NewMsgFilterClear() - - // Encode with latest protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of MsgFilterClear failed %v err <%v>", msg, err) - } - - // Decode with old protocol version. - var readmsg wire.MsgFilterClear - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err == nil { - t.Errorf("decode of MsgFilterClear succeeded when it "+ - "shouldn't have %v", msg) - } -} - // TestFilterClearWire tests the MsgFilterClear wire encode and decode for // various protocol versions. func TestFilterClearWire(t *testing.T) { @@ -79,22 +59,6 @@ func TestFilterClearWire(t *testing.T) { msgFilterClearEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0037Version + 1. - { - msgFilterClear, - msgFilterClear, - msgFilterClearEncoded, - wire.BIP0037Version + 1, - }, - - // Protocol version BIP0037Version. - { - msgFilterClear, - msgFilterClear, - msgFilterClearEncoded, - wire.BIP0037Version, - }, } t.Logf("Running %d tests", len(tests)) @@ -127,71 +91,3 @@ func TestFilterClearWire(t *testing.T) { } } } - -// TestFilterClearWireErrors performs negative tests against wire encode and -// decode of MsgFilterClear to confirm error paths work correctly. -func TestFilterClearWireErrors(t *testing.T) { - pverNoFilterClear := wire.BIP0037Version - 1 - wireErr := &wire.MessageError{} - - baseFilterClear := wire.NewMsgFilterClear() - baseFilterClearEncoded := []byte{} - - tests := []struct { - in *wire.MsgFilterClear // Value to encode - buf []byte // Wire encoding - pver uint32 // Protocol version for wire encoding - max int // Max size of fixed buffer to induce errors - writeErr error // Expected write error - readErr error // Expected read error - }{ - // Force error due to unsupported protocol version. - { - baseFilterClear, baseFilterClearEncoded, - pverNoFilterClear, 4, wireErr, wireErr, - }, - } - - t.Logf("Running %d tests", len(tests)) - for i, test := range tests { - // Encode to wire format. - w := newFixedWriter(test.max) - err := test.in.BtcEncode(w, test.pver) - if reflect.TypeOf(err) != reflect.TypeOf(test.writeErr) { - t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", - i, err, test.writeErr) - continue - } - - // For errors which are not of type wire.MessageError, check - // them for equality. - if _, ok := err.(*wire.MessageError); !ok { - if err != test.writeErr { - t.Errorf("BtcEncode #%d wrong error got: %v, "+ - "want: %v", i, err, test.writeErr) - continue - } - } - - // Decode from wire format. - var msg wire.MsgFilterClear - r := newFixedReader(test.max, test.buf) - err = msg.BtcDecode(r, test.pver) - if reflect.TypeOf(err) != reflect.TypeOf(test.readErr) { - t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", - i, err, test.readErr) - continue - } - - // For errors which are not of type wire.MessageError, check - // them for equality. - if _, ok := err.(*wire.MessageError); !ok { - if err != test.readErr { - t.Errorf("BtcDecode #%d wrong error got: %v, "+ - "want: %v", i, err, test.readErr) - continue - } - } - - } -} diff --git a/wire/msgfilterload.go b/wire/msgfilterload.go index d1c1cc33..855391b7 100644 --- a/wire/msgfilterload.go +++ b/wire/msgfilterload.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -38,7 +39,7 @@ const ( MaxFilterLoadFilterSize = 36000 ) -// MsgFilterLoad implements the Message interface and represents a bitcoin +// MsgFilterLoad implements the Message interface and represents a decred // filterload message which is used to reset a Bloom filter. // // This message was not added until protocol version BIP0037Version. @@ -49,15 +50,9 @@ type MsgFilterLoad struct { Flags BloomUpdateType } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgFilterLoad) BtcDecode(r io.Reader, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filterload message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterLoad.BtcDecode", str) - } - var err error msg.Filter, err = readVarBytes(r, pver, MaxFilterLoadFilterSize, "filterload filter size") @@ -79,15 +74,9 @@ func (msg *MsgFilterLoad) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgFilterLoad) BtcEncode(w io.Writer, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("filterload message invalid for protocol "+ - "version %d", pver) - return messageError("MsgFilterLoad.BtcEncode", str) - } - size := len(msg.Filter) if size > MaxFilterLoadFilterSize { str := fmt.Sprintf("filterload filter size too large for message "+ @@ -129,7 +118,7 @@ func (msg *MsgFilterLoad) MaxPayloadLength(pver uint32) uint32 { MaxFilterLoadFilterSize + 9 } -// NewMsgFilterLoad returns a new bitcoin filterload message that conforms to +// NewMsgFilterLoad returns a new decred filterload message that conforms to // the Message interface. See MsgFilterLoad for details. func NewMsgFilterLoad(filter []byte, hashFuncs uint32, tweak uint32, flags BloomUpdateType) *MsgFilterLoad { return &MsgFilterLoad{ diff --git a/wire/msgfilterload_test.go b/wire/msgfilterload_test.go index 97503a44..f48e076e 100644 --- a/wire/msgfilterload_test.go +++ b/wire/msgfilterload_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,7 +11,7 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // TestFilterCLearLatest tests the MsgFilterLoad API against the latest protocol @@ -54,35 +55,12 @@ func TestFilterLoadLatest(t *testing.T) { return } -// TestFilterLoadCrossProtocol tests the MsgFilterLoad API when encoding with -// the latest protocol version and decoding with BIP0031Version. -func TestFilterLoadCrossProtocol(t *testing.T) { - data := []byte{0x01, 0x02} - msg := wire.NewMsgFilterLoad(data, 10, 0, 0) - - // Encode with latest protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of NewMsgFilterLoad failed %v err <%v>", msg, - err) - } - - // Decode with old protocol version. - var readmsg wire.MsgFilterLoad - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err == nil { - t.Errorf("decode of MsgFilterLoad succeeded when it shouldn't have %v", - msg) - } -} - // TestFilterLoadMaxFilterSize tests the MsgFilterLoad API maximum filter size. func TestFilterLoadMaxFilterSize(t *testing.T) { data := bytes.Repeat([]byte{0xff}, 36001) msg := wire.NewMsgFilterLoad(data, 10, 0, 0) - // Encode with latest protocol version. + // Encode with latest protocol version.; var buf bytes.Buffer err := msg.BtcEncode(&buf, wire.ProtocolVersion) if err == nil { @@ -132,8 +110,6 @@ func TestFilterLoadMaxHashFuncsSize(t *testing.T) { // of MsgFilterLoad to confirm error paths work correctly. func TestFilterLoadWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverNoFilterLoad := wire.BIP0037Version - 1 - wireErr := &wire.MessageError{} baseFilter := []byte{0x01, 0x02, 0x03, 0x04} baseFilterLoad := wire.NewMsgFilterLoad(baseFilter, 10, 0, @@ -178,11 +154,6 @@ func TestFilterLoadWireErrors(t *testing.T) { baseFilterLoad, baseFilterLoadEncoded, pver, 13, io.ErrShortWrite, io.EOF, }, - // Force error due to unsupported protocol version. - { - baseFilterLoad, baseFilterLoadEncoded, pverNoFilterLoad, - 10, wireErr, wireErr, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msggetaddr.go b/wire/msggetaddr.go index 0a4bf57a..b77f9cc4 100644 --- a/wire/msggetaddr.go +++ b/wire/msggetaddr.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "io" ) -// MsgGetAddr implements the Message interface and represents a bitcoin +// MsgGetAddr implements the Message interface and represents a decred // getaddr message. It is used to request a list of known active peers on the // network from a peer to help identify potential nodes. The list is returned // via one or more addr messages (MsgAddr). @@ -16,13 +17,13 @@ import ( // This message has no payload. type MsgGetAddr struct{} -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetAddr) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetAddr) BtcEncode(w io.Writer, pver uint32) error { return nil @@ -40,7 +41,7 @@ func (msg *MsgGetAddr) MaxPayloadLength(pver uint32) uint32 { return 0 } -// NewMsgGetAddr returns a new bitcoin getaddr message that conforms to the +// NewMsgGetAddr returns a new decred getaddr message that conforms to the // Message interface. See MsgGetAddr for details. func NewMsgGetAddr() *MsgGetAddr { return &MsgGetAddr{} diff --git a/wire/msggetaddr_test.go b/wire/msggetaddr_test.go index 15c68bd7..2900caa3 100644 --- a/wire/msggetaddr_test.go +++ b/wire/msggetaddr_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestGetAddr tests the MsgGetAddr API. @@ -57,38 +58,6 @@ func TestGetAddrWire(t *testing.T) { msgGetAddrEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version. - { - msgGetAddr, - msgGetAddr, - msgGetAddrEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - msgGetAddr, - msgGetAddr, - msgGetAddrEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - msgGetAddr, - msgGetAddr, - msgGetAddrEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - msgGetAddr, - msgGetAddr, - msgGetAddrEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msggetblocks.go b/wire/msggetblocks.go index cc070d03..c2e022d6 100644 --- a/wire/msggetblocks.go +++ b/wire/msggetblocks.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,13 +8,15 @@ package wire import ( "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) // MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed // per message. const MaxBlockLocatorsPerMsg = 500 -// MsgGetBlocks implements the Message interface and represents a bitcoin +// MsgGetBlocks implements the Message interface and represents a decred // getblocks message. It is used to request a list of blocks starting after the // last known hash in the slice of block locator hashes. The list is returned // via an inv message (MsgInv) and is limited by a specific hash to stop at or @@ -30,12 +33,12 @@ const MaxBlockLocatorsPerMsg = 500 // closer to the genesis block you get. type MsgGetBlocks struct { ProtocolVersion uint32 - BlockLocatorHashes []*ShaHash - HashStop ShaHash + BlockLocatorHashes []*chainhash.Hash + HashStop chainhash.Hash } // AddBlockLocatorHash adds a new block locator hash to the message. -func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *ShaHash) error { +func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *chainhash.Hash) error { if len(msg.BlockLocatorHashes)+1 > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message [max %v]", MaxBlockLocatorsPerMsg) @@ -46,7 +49,7 @@ func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *ShaHash) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32) error { err := readElement(r, &msg.ProtocolVersion) @@ -65,9 +68,9 @@ func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32) error { return messageError("MsgGetBlocks.BtcDecode", str) } - msg.BlockLocatorHashes = make([]*ShaHash, 0, count) + msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { - sha := ShaHash{} + sha := chainhash.Hash{} err := readElement(r, &sha) if err != nil { return err @@ -83,7 +86,7 @@ func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetBlocks) BtcEncode(w io.Writer, pver uint32) error { count := len(msg.BlockLocatorHashes) @@ -129,16 +132,16 @@ func (msg *MsgGetBlocks) Command() string { func (msg *MsgGetBlocks) MaxPayloadLength(pver uint32) uint32 { // Protocol version 4 bytes + num hashes (varInt) + max block locator // hashes + hash stop. - return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * HashSize) + HashSize + return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * chainhash.HashSize) + chainhash.HashSize } -// NewMsgGetBlocks returns a new bitcoin getblocks message that conforms to the +// NewMsgGetBlocks returns a new decred getblocks message that conforms to the // Message interface using the passed parameters and defaults for the remaining // fields. -func NewMsgGetBlocks(hashStop *ShaHash) *MsgGetBlocks { +func NewMsgGetBlocks(hashStop *chainhash.Hash) *MsgGetBlocks { return &MsgGetBlocks{ ProtocolVersion: ProtocolVersion, - BlockLocatorHashes: make([]*ShaHash, 0, MaxBlockLocatorsPerMsg), + BlockLocatorHashes: make([]*chainhash.Hash, 0, MaxBlockLocatorsPerMsg), HashStop: *hashStop, } } diff --git a/wire/msggetblocks_test.go b/wire/msggetblocks_test.go index 69806477..8ec7bac4 100644 --- a/wire/msggetblocks_test.go +++ b/wire/msggetblocks_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestGetBlocks tests the MsgGetBlocks API. @@ -20,14 +23,14 @@ func TestGetBlocks(t *testing.T) { // Block 99500 hash. hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - locatorHash, err := wire.NewShaHashFromStr(hashStr) + locatorHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := wire.NewShaHashFromStr(hashStr) + hashStop, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -90,27 +93,27 @@ func TestGetBlocksWire(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := wire.NewShaHashFromStr(hashStr) + hashLocator, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := wire.NewShaHashFromStr(hashStr) + hashLocator2, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := wire.NewShaHashFromStr(hashStr) + hashStop, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // MsgGetBlocks message with no block locators or stop hash. - noLocators := wire.NewMsgGetBlocks(&wire.ShaHash{}) + noLocators := wire.NewMsgGetBlocks(&chainhash.Hash{}) noLocators.ProtocolVersion = pver noLocatorsEncoded := []byte{ 0x62, 0xea, 0x00, 0x00, // Protocol version 60002 @@ -164,70 +167,6 @@ func TestGetBlocksWire(t *testing.T) { multiLocatorsEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Versionwith multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -272,21 +211,21 @@ func TestGetBlocksWireErrors(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := wire.NewShaHashFromStr(hashStr) + hashLocator, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := wire.NewShaHashFromStr(hashStr) + hashLocator2, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := wire.NewShaHashFromStr(hashStr) + hashStop, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } diff --git a/wire/msggetdata.go b/wire/msggetdata.go index 1da61d88..7262602e 100644 --- a/wire/msggetdata.go +++ b/wire/msggetdata.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,7 @@ import ( "io" ) -// MsgGetData implements the Message interface and represents a bitcoin +// MsgGetData implements the Message interface and represents a decred // getdata message. It is used to request data such as blocks and transactions // from another peer. It should be used in response to the inv (MsgInv) message // to request the actual data referenced by each inventory vector the receiving @@ -35,7 +36,7 @@ func (msg *MsgGetData) AddInvVect(iv *InvVect) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetData) BtcDecode(r io.Reader, pver uint32) error { count, err := readVarInt(r, pver) @@ -62,7 +63,7 @@ func (msg *MsgGetData) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetData) BtcEncode(w io.Writer, pver uint32) error { // Limit to max inventory vectors per message. @@ -100,7 +101,7 @@ func (msg *MsgGetData) MaxPayloadLength(pver uint32) uint32 { return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload) } -// NewMsgGetData returns a new bitcoin getdata message that conforms to the +// NewMsgGetData returns a new decred getdata message that conforms to the // Message interface. See MsgGetData for details. func NewMsgGetData() *MsgGetData { return &MsgGetData{ @@ -108,7 +109,7 @@ func NewMsgGetData() *MsgGetData { } } -// NewMsgGetDataSizeHint returns a new bitcoin getdata message that conforms to +// NewMsgGetDataSizeHint returns a new decred getdata message that conforms to // the Message interface. See MsgGetData for details. This function differs // from NewMsgGetData in that it allows a default allocation size for the // backing array which houses the inventory vector list. This allows callers diff --git a/wire/msggetdata_test.go b/wire/msggetdata_test.go index d7b29097..7764f437 100644 --- a/wire/msggetdata_test.go +++ b/wire/msggetdata_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestGetData tests the MsgGetData API. @@ -37,7 +40,7 @@ func TestGetData(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := wire.ShaHash{} + hash := chainhash.Hash{} iv := wire.NewInvVect(wire.InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -75,14 +78,14 @@ func TestGetData(t *testing.T) { func TestGetDataWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Transation 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := wire.NewShaHashFromStr(hashStr) + txHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -135,70 +138,6 @@ func TestGetDataWire(t *testing.T) { MultiInvEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -240,7 +179,7 @@ func TestGetDataWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } diff --git a/wire/msggetheaders.go b/wire/msggetheaders.go index 8275de4e..15f3c9f0 100644 --- a/wire/msggetheaders.go +++ b/wire/msggetheaders.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,9 +8,11 @@ package wire import ( "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) -// MsgGetHeaders implements the Message interface and represents a bitcoin +// MsgGetHeaders implements the Message interface and represents a decred // getheaders message. It is used to request a list of block headers for // blocks starting after the last known hash in the slice of block locator // hashes. The list is returned via a headers message (MsgHeaders) and is @@ -27,12 +30,12 @@ import ( // closer to the genesis block you get. type MsgGetHeaders struct { ProtocolVersion uint32 - BlockLocatorHashes []*ShaHash - HashStop ShaHash + BlockLocatorHashes []*chainhash.Hash + HashStop chainhash.Hash } // AddBlockLocatorHash adds a new block locator hash to the message. -func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *ShaHash) error { +func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *chainhash.Hash) error { if len(msg.BlockLocatorHashes)+1 > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message [max %v]", MaxBlockLocatorsPerMsg) @@ -43,7 +46,7 @@ func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *ShaHash) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32) error { err := readElement(r, &msg.ProtocolVersion) @@ -62,9 +65,9 @@ func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32) error { return messageError("MsgGetHeaders.BtcDecode", str) } - msg.BlockLocatorHashes = make([]*ShaHash, 0, count) + msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { - sha := ShaHash{} + sha := chainhash.Hash{} err := readElement(r, &sha) if err != nil { return err @@ -80,7 +83,7 @@ func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetHeaders) BtcEncode(w io.Writer, pver uint32) error { // Limit to max block locator hashes per message. @@ -127,13 +130,13 @@ func (msg *MsgGetHeaders) Command() string { func (msg *MsgGetHeaders) MaxPayloadLength(pver uint32) uint32 { // Version 4 bytes + num block locator hashes (varInt) + max allowed block // locators + hash stop. - return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * HashSize) + HashSize + return 4 + MaxVarIntPayload + (MaxBlockLocatorsPerMsg * chainhash.HashSize) + chainhash.HashSize } -// NewMsgGetHeaders returns a new bitcoin getheaders message that conforms to +// NewMsgGetHeaders returns a new decred getheaders message that conforms to // the Message interface. See MsgGetHeaders for details. func NewMsgGetHeaders() *MsgGetHeaders { return &MsgGetHeaders{ - BlockLocatorHashes: make([]*ShaHash, 0, MaxBlockLocatorsPerMsg), + BlockLocatorHashes: make([]*chainhash.Hash, 0, MaxBlockLocatorsPerMsg), } } diff --git a/wire/msggetheaders_test.go b/wire/msggetheaders_test.go index a87fa4ed..85571e91 100644 --- a/wire/msggetheaders_test.go +++ b/wire/msggetheaders_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestGetHeaders tests the MsgGetHeader API. @@ -20,7 +23,7 @@ func TestGetHeaders(t *testing.T) { // Block 99500 hash. hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - locatorHash, err := wire.NewShaHashFromStr(hashStr) + locatorHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -79,21 +82,21 @@ func TestGetHeadersWire(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := wire.NewShaHashFromStr(hashStr) + hashLocator, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := wire.NewShaHashFromStr(hashStr) + hashLocator2, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := wire.NewShaHashFromStr(hashStr) + hashStop, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -154,70 +157,6 @@ func TestGetHeadersWire(t *testing.T) { multiLocatorsEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Versionwith multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion with no block locators. - { - noLocators, - noLocators, - noLocatorsEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion multiple block locators. - { - multiLocators, - multiLocators, - multiLocatorsEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -262,21 +201,21 @@ func TestGetHeadersWireErrors(t *testing.T) { // Block 99499 hash. hashStr := "2710f40c87ec93d010a6fd95f42c59a2cbacc60b18cf6b7957535" - hashLocator, err := wire.NewShaHashFromStr(hashStr) + hashLocator, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 99500 hash. hashStr = "2e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" - hashLocator2, err := wire.NewShaHashFromStr(hashStr) + hashLocator2, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Block 100000 hash. hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hashStop, err := wire.NewShaHashFromStr(hashStr) + hashStop, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } diff --git a/wire/msggetminingstate.go b/wire/msggetminingstate.go new file mode 100644 index 00000000..23920ab9 --- /dev/null +++ b/wire/msggetminingstate.go @@ -0,0 +1,45 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "io" +) + +// MsgGetMiningState implements the Message interface and represents a +// getminingstate message. It is used to request the current mining state +// from a peer. +type MsgGetMiningState struct{} + +// BtcDecode decodes r using the decred protocol encoding into the receiver. +// This is part of the Message interface implementation. +func (msg *MsgGetMiningState) BtcDecode(r io.Reader, pver uint32) error { + return nil +} + +// BtcEncode encodes the receiver to w using the decred protocol encoding. +// This is part of the Message interface implementation. +func (msg *MsgGetMiningState) BtcEncode(w io.Writer, pver uint32) error { + return nil +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgGetMiningState) Command() string { + return CmdGetMiningState +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgGetMiningState) MaxPayloadLength(pver uint32) uint32 { + return 0 +} + +// NewMsgGetMiningState returns a new decred pong message that conforms to the Message +// interface. See MsgPong for details. +func NewMsgGetMiningState() *MsgGetMiningState { + return &MsgGetMiningState{} +} diff --git a/wire/msgheaders.go b/wire/msgheaders.go index efdb46d7..ace822d6 100644 --- a/wire/msgheaders.go +++ b/wire/msgheaders.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,10 +11,10 @@ import ( ) // MaxBlockHeadersPerMsg is the maximum number of block headers that can be in -// a single bitcoin headers message. +// a single decred headers message. const MaxBlockHeadersPerMsg = 2000 -// MsgHeaders implements the Message interface and represents a bitcoin headers +// MsgHeaders implements the Message interface and represents a decred headers // message. It is used to deliver block header information in response // to a getheaders message (MsgGetHeaders). The maximum number of block headers // per message is currently 2000. See MsgGetHeaders for details on requesting @@ -34,7 +35,7 @@ func (msg *MsgHeaders) AddBlockHeader(bh *BlockHeader) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgHeaders) BtcDecode(r io.Reader, pver uint32) error { count, err := readVarInt(r, pver) @@ -74,7 +75,7 @@ func (msg *MsgHeaders) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgHeaders) BtcEncode(w io.Writer, pver uint32) error { // Limit to max block headers per message. @@ -125,7 +126,7 @@ func (msg *MsgHeaders) MaxPayloadLength(pver uint32) uint32 { MaxBlockHeadersPerMsg) } -// NewMsgHeaders returns a new bitcoin headers message that conforms to the +// NewMsgHeaders returns a new decred headers message that conforms to the // Message interface. See MsgHeaders for details. func NewMsgHeaders() *MsgHeaders { return &MsgHeaders{ diff --git a/wire/msgheaders_test.go b/wire/msgheaders_test.go index ee80ac42..69afa5e3 100644 --- a/wire/msgheaders_test.go +++ b/wire/msgheaders_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,14 +10,15 @@ import ( "io" "reflect" "testing" + "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestHeaders tests the MsgHeaders API. func TestHeaders(t *testing.T) { - pver := uint32(60002) + pver := uint32(1) // Ensure the command is expected value. wantCmd := "headers" @@ -29,7 +31,7 @@ func TestHeaders(t *testing.T) { // Ensure max payload is expected value for latest protocol version. // Num headers (varInt) + max allowed headers (header length + 1 byte // for the number of transactions which is always 0). - wantPayload := uint32(162009) + wantPayload := uint32(362009) maxPayload := msg.MaxPayloadLength(pver) if maxPayload != wantPayload { t.Errorf("MaxPayloadLength: wrong max payload length for "+ @@ -38,7 +40,7 @@ func TestHeaders(t *testing.T) { } // Ensure headers are added properly. - bh := &blockOne.Header + bh := &testBlock.Header msg.AddBlockHeader(bh) if !reflect.DeepEqual(msg.Headers[0], bh) { t.Errorf("AddHeader: wrong header - got %v, want %v", @@ -63,13 +65,25 @@ func TestHeaders(t *testing.T) { // TestHeadersWire tests the MsgHeaders wire encode and decode for various // numbers of headers and protocol versions. func TestHeadersWire(t *testing.T) { - hash := mainNetGenesisHash - merkleHash := blockOne.Header.MerkleRoot - bits := uint32(0x1d00ffff) - nonce := uint32(0x9962e301) - bh := wire.NewBlockHeader(&hash, &merkleHash, bits, nonce) - bh.Version = blockOne.Header.Version - bh.Timestamp = blockOne.Header.Timestamp + bh := wire.NewBlockHeader( + testBlock.Header.Version, // Version + &mainNetGenesisHash, // PrevHash + &testBlock.Header.MerkleRoot, // MerkleRootHash + &testBlock.Header.StakeRoot, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + uint32(0x1d00ffff), // Bits + int64(0x0000000000000000), // Sbits + uint32(0), // Height + uint32(0), // Size + uint32(0x01010101), // Nonce + [36]byte{}, // ExtraData + ) + bh.Timestamp = time.Unix(0x4966bc61, 0) // Empty headers message. noHeaders := wire.NewMsgHeaders() @@ -91,9 +105,27 @@ func TestHeadersWire(t *testing.T) { 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot - 0x61, 0xbc, 0x66, 0x49, // Timestamp + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, // Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits - 0x01, 0xe3, 0x62, 0x99, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits + 0x00, 0x00, 0x00, 0x00, // Height + 0x00, 0x00, 0x00, 0x00, // Size + 0x61, 0xbc, 0x66, 0x49, // Timestamp + 0x01, 0x01, 0x01, 0x01, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // TxnCount (0 for headers message) } @@ -118,69 +150,6 @@ func TestHeadersWire(t *testing.T) { oneHeaderEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version with no headers. - { - noHeaders, - noHeaders, - noHeadersEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with one header. - { - oneHeader, - oneHeader, - oneHeaderEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version with no headers. - { - noHeaders, - noHeaders, - noHeadersEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version with one header. - { - oneHeader, - oneHeader, - oneHeaderEncoded, - wire.BIP0031Version, - }, - // Protocol version NetAddressTimeVersion with no headers. - { - noHeaders, - noHeaders, - noHeadersEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with one header. - { - oneHeader, - oneHeader, - oneHeaderEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion with no headers. - { - noHeaders, - noHeaders, - noHeadersEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion with one header. - { - oneHeader, - oneHeader, - oneHeaderEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -221,12 +190,30 @@ func TestHeadersWireErrors(t *testing.T) { wireErr := &wire.MessageError{} hash := mainNetGenesisHash - merkleHash := blockOne.Header.MerkleRoot + merkleHash := testBlock.Header.MerkleRoot bits := uint32(0x1d00ffff) nonce := uint32(0x9962e301) - bh := wire.NewBlockHeader(&hash, &merkleHash, bits, nonce) - bh.Version = blockOne.Header.Version - bh.Timestamp = blockOne.Header.Timestamp + bh := wire.NewBlockHeader( + int32(pver), // Verision + &hash, // PrevHash + &merkleHash, // MerkleRootHash + &merkleHash, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + bits, // Bits + int64(0x0000000000000000), // Sbits + uint32(1), // Height + uint32(0), // Size + nonce, // Nonce + [36]byte{}, // ExtraData + ) + + bh.Version = testBlock.Header.Version + bh.Timestamp = testBlock.Header.Timestamp // Headers message with one header. oneHeader := wire.NewMsgHeaders() @@ -242,8 +229,19 @@ func TestHeadersWireErrors(t *testing.T) { 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, // Poolsize 0x61, 0xbc, 0x66, 0x49, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits 0x01, 0xe3, 0x62, 0x99, // Nonce 0x00, // TxnCount (0 for headers message) } @@ -261,9 +259,26 @@ func TestHeadersWireErrors(t *testing.T) { // Intentionally invalid block header that has a transaction count used // to force errors. - bhTrans := wire.NewBlockHeader(&hash, &merkleHash, bits, nonce) - bhTrans.Version = blockOne.Header.Version - bhTrans.Timestamp = blockOne.Header.Timestamp + bhTrans := wire.NewBlockHeader( + int32(0), // Verision + &hash, // PrevHash + &merkleHash, // MerkleRootHash + &merkleHash, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + bits, // Bits + int64(0x0000000000000000), // Sbits + uint32(1), // Height + uint32(0), // Size + nonce, // Nonce + [36]byte{}, // ExtraData + ) + bhTrans.Version = testBlock.Header.Version + bhTrans.Timestamp = testBlock.Header.Timestamp transHeader := wire.NewMsgHeaders() transHeader.AddBlockHeader(bhTrans) @@ -274,12 +289,23 @@ func TestHeadersWireErrors(t *testing.T) { 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock - 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, - 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, - 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, // Poolsize 0x61, 0xbc, 0x66, 0x49, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits 0x01, 0xe3, 0x62, 0x99, // Nonce 0x01, // TxnCount (should be 0 for headers message, but 1 to force error) } @@ -300,9 +326,9 @@ func TestHeadersWireErrors(t *testing.T) { // Force error with greater than max headers. {maxHeaders, maxHeadersEncoded, pver, 3, wireErr, wireErr}, // Force error with number of transactions. - {transHeader, transHeaderEncoded, pver, 81, io.ErrShortWrite, io.EOF}, + {transHeader, transHeaderEncoded, pver, 181, io.ErrShortWrite, io.EOF}, // Force error with included transactions. - {transHeader, transHeaderEncoded, pver, len(transHeaderEncoded), nil, wireErr}, + {transHeader, transHeaderEncoded, pver, len(transHeaderEncoded), io.ErrShortWrite, io.ErrUnexpectedEOF}, } t.Logf("Running %d tests", len(tests)) @@ -331,6 +357,7 @@ func TestHeadersWireErrors(t *testing.T) { r := newFixedReader(test.max, test.buf) err = msg.BtcDecode(r, test.pver) if reflect.TypeOf(err) != reflect.TypeOf(test.readErr) { + spew.Dump(test) t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, test.readErr) continue diff --git a/wire/msginv.go b/wire/msginv.go index 66af52a7..76ae7637 100644 --- a/wire/msginv.go +++ b/wire/msginv.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -18,7 +19,7 @@ import ( // typical case. const defaultInvListAlloc = 1000 -// MsgInv implements the Message interface and represents a bitcoin inv message. +// MsgInv implements the Message interface and represents a decred inv message. // It is used to advertise a peer's known data such as blocks and transactions // through inventory vectors. It may be sent unsolicited to inform other peers // of the data or in response to a getblocks message (MsgGetBlocks). Each @@ -43,7 +44,7 @@ func (msg *MsgInv) AddInvVect(iv *InvVect) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgInv) BtcDecode(r io.Reader, pver uint32) error { count, err := readVarInt(r, pver) @@ -70,7 +71,7 @@ func (msg *MsgInv) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgInv) BtcEncode(w io.Writer, pver uint32) error { // Limit to max inventory vectors per message. @@ -108,7 +109,7 @@ func (msg *MsgInv) MaxPayloadLength(pver uint32) uint32 { return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload) } -// NewMsgInv returns a new bitcoin inv message that conforms to the Message +// NewMsgInv returns a new decred inv message that conforms to the Message // interface. See MsgInv for details. func NewMsgInv() *MsgInv { return &MsgInv{ @@ -116,7 +117,7 @@ func NewMsgInv() *MsgInv { } } -// NewMsgInvSizeHint returns a new bitcoin inv message that conforms to the +// NewMsgInvSizeHint returns a new decred inv message that conforms to the // Message interface. See MsgInv for details. This function differs from // NewMsgInv in that it allows a default allocation size for the backing array // which houses the inventory vector list. This allows callers who know in diff --git a/wire/msginv_test.go b/wire/msginv_test.go index de50933a..43720a9a 100644 --- a/wire/msginv_test.go +++ b/wire/msginv_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestInv tests the MsgInv API. @@ -37,7 +40,7 @@ func TestInv(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := wire.ShaHash{} + hash := chainhash.Hash{} iv := wire.NewInvVect(wire.InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -75,14 +78,14 @@ func TestInv(t *testing.T) { func TestInvWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Transation 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := wire.NewShaHashFromStr(hashStr) + txHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -135,70 +138,6 @@ func TestInvWire(t *testing.T) { MultiInvEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -240,7 +179,7 @@ func TestInvWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } diff --git a/wire/msgmempool.go b/wire/msgmempool.go index f6b08c2e..77eeb5d0 100644 --- a/wire/msgmempool.go +++ b/wire/msgmempool.go @@ -1,15 +1,15 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire import ( - "fmt" "io" ) -// MsgMemPool implements the Message interface and represents a bitcoin mempool +// MsgMemPool implements the Message interface and represents a decred mempool // message. It is used to request a list of transactions still in the active // memory pool of a relay. // @@ -17,27 +17,15 @@ import ( // starting with BIP0035Version. type MsgMemPool struct{} -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgMemPool) BtcDecode(r io.Reader, pver uint32) error { - if pver < BIP0035Version { - str := fmt.Sprintf("mempool message invalid for protocol "+ - "version %d", pver) - return messageError("MsgMemPool.BtcDecode", str) - } - return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgMemPool) BtcEncode(w io.Writer, pver uint32) error { - if pver < BIP0035Version { - str := fmt.Sprintf("mempool message invalid for protocol "+ - "version %d", pver) - return messageError("MsgMemPool.BtcEncode", str) - } - return nil } @@ -53,7 +41,7 @@ func (msg *MsgMemPool) MaxPayloadLength(pver uint32) uint32 { return 0 } -// NewMsgMemPool returns a new bitcoin pong message that conforms to the Message +// NewMsgMemPool returns a new decred pong message that conforms to the Message // interface. See MsgPong for details. func NewMsgMemPool() *MsgMemPool { return &MsgMemPool{} diff --git a/wire/msgmempool_test.go b/wire/msgmempool_test.go index 5a08ec79..5b408ce6 100644 --- a/wire/msgmempool_test.go +++ b/wire/msgmempool_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "bytes" "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) func TestMemPool(t *testing.T) { @@ -38,15 +39,6 @@ func TestMemPool(t *testing.T) { t.Errorf("encode of MsgMemPool failed %v err <%v>", msg, err) } - // Older protocol versions should fail encode since message didn't - // exist yet. - oldPver := wire.BIP0035Version - 1 - err = msg.BtcEncode(&buf, oldPver) - if err == nil { - s := "encode of MsgMemPool passed for old protocol version %v err <%v>" - t.Errorf(s, msg, err) - } - // Test decode with latest protocol version. readmsg := wire.NewMsgMemPool() err = readmsg.BtcDecode(&buf, pver) @@ -54,13 +46,5 @@ func TestMemPool(t *testing.T) { t.Errorf("decode of MsgMemPool failed [%v] err <%v>", buf, err) } - // Older protocol versions should fail decode since message didn't - // exist yet. - err = readmsg.BtcDecode(&buf, oldPver) - if err == nil { - s := "decode of MsgMemPool passed for old protocol version %v err <%v>" - t.Errorf(s, msg, err) - } - return } diff --git a/wire/msgmerkleblock.go b/wire/msgmerkleblock.go index 268a89ca..6eb6c4ff 100644 --- a/wire/msgmerkleblock.go +++ b/wire/msgmerkleblock.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,30 +8,34 @@ package wire import ( "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) // maxFlagsPerMerkleBlock is the maximum number of flag bytes that could // possibly fit into a merkle block. Since each transaction is represented by // a single bit, this is the max number of transactions per block divided by // 8 bits per byte. Then an extra one to cover partials. -const maxFlagsPerMerkleBlock = maxTxPerBlock / 8 +const maxFlagsPerMerkleBlock = MaxTxPerTxTree / 8 -// MsgMerkleBlock implements the Message interface and represents a bitcoin +// MsgMerkleBlock implements the Message interface and represents a decred // merkleblock message which is used to reset a Bloom filter. // // This message was not added until protocol version BIP0037Version. type MsgMerkleBlock struct { - Header BlockHeader - Transactions uint32 - Hashes []*ShaHash - Flags []byte + Header BlockHeader + Transactions uint32 + Hashes []*chainhash.Hash + STransactions uint32 + SHashes []*chainhash.Hash + Flags []byte } // AddTxHash adds a new transaction hash to the message. -func (msg *MsgMerkleBlock) AddTxHash(hash *ShaHash) error { - if len(msg.Hashes)+1 > maxTxPerBlock { +func (msg *MsgMerkleBlock) AddTxHash(hash *chainhash.Hash) error { + if len(msg.Hashes)+1 > MaxTxPerTxTree { str := fmt.Sprintf("too many tx hashes for message [max %v]", - maxTxPerBlock) + MaxTxPerTxTree) return messageError("MsgMerkleBlock.AddTxHash", str) } @@ -38,15 +43,21 @@ func (msg *MsgMerkleBlock) AddTxHash(hash *ShaHash) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. -// This is part of the Message interface implementation. -func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("merkleblock message invalid for protocol "+ - "version %d", pver) - return messageError("MsgMerkleBlock.BtcDecode", str) +// AddSTxHash adds a new stake transaction hash to the message. +func (msg *MsgMerkleBlock) AddSTxHash(hash *chainhash.Hash) error { + if len(msg.SHashes)+1 > MaxTxPerTxTree { + str := fmt.Sprintf("too many tx hashes for message [max %v]", + MaxTxPerTxTree) + return messageError("MsgMerkleBlock.AddSTxHash", str) } + msg.SHashes = append(msg.SHashes, hash) + return nil +} + +// BtcDecode decodes r using the decred protocol encoding into the receiver. +// This is part of the Message interface implementation. +func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { err := readBlockHeader(r, pver, &msg.Header) if err != nil { return err @@ -62,15 +73,15 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { if err != nil { return err } - if count > maxTxPerBlock { + if count > MaxTxPerTxTree { str := fmt.Sprintf("too many transaction hashes for message "+ - "[count %v, max %v]", count, maxTxPerBlock) + "[count %v, max %v]", count, MaxTxPerTxTree) return messageError("MsgMerkleBlock.BtcDecode", str) } - msg.Hashes = make([]*ShaHash, 0, count) + msg.Hashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { - var sha ShaHash + var sha chainhash.Hash err := readElement(r, &sha) if err != nil { return err @@ -78,6 +89,32 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { msg.AddTxHash(&sha) } + err = readElement(r, &msg.STransactions) + if err != nil { + return err + } + + // Read num block locator hashes for stake and limit to max. + scount, err := readVarInt(r, pver) + if err != nil { + return err + } + if scount > MaxTxPerTxTree { + str := fmt.Sprintf("too many stransaction hashes for message "+ + "[count %v, max %v]", scount, MaxTxPerTxTree) + return messageError("MsgMerkleBlock.BtcDecode", str) + } + + msg.SHashes = make([]*chainhash.Hash, 0, scount) + for i := uint64(0); i < scount; i++ { + var sha chainhash.Hash + err := readElement(r, &sha) + if err != nil { + return err + } + msg.AddSTxHash(&sha) + } + msg.Flags, err = readVarBytes(r, pver, maxFlagsPerMerkleBlock, "merkle block flags size") if err != nil { @@ -87,22 +124,24 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgMerkleBlock) BtcEncode(w io.Writer, pver uint32) error { - if pver < BIP0037Version { - str := fmt.Sprintf("merkleblock message invalid for protocol "+ - "version %d", pver) - return messageError("MsgMerkleBlock.BtcEncode", str) - } - // Read num transaction hashes and limit to max. numHashes := len(msg.Hashes) - if numHashes > maxTxPerBlock { + if numHashes > MaxTxPerTxTree { str := fmt.Sprintf("too many transaction hashes for message "+ - "[count %v, max %v]", numHashes, maxTxPerBlock) + "[count %v, max %v]", numHashes, MaxTxPerTxTree) return messageError("MsgMerkleBlock.BtcDecode", str) } + // Read num stake transaction hashes and limit to max. + numSHashes := len(msg.SHashes) + if numSHashes > MaxTxPerTxTree { + str := fmt.Sprintf("too many stake transaction hashes for message "+ + "[count %v, max %v]", numHashes, MaxTxPerTxTree) + return messageError("MsgMerkleBlock.BtcDecode", str) + } + numFlagBytes := len(msg.Flags) if numFlagBytes > maxFlagsPerMerkleBlock { str := fmt.Sprintf("too many flag bytes for message [count %v, "+ @@ -131,6 +170,22 @@ func (msg *MsgMerkleBlock) BtcEncode(w io.Writer, pver uint32) error { } } + err = writeElement(w, msg.STransactions) + if err != nil { + return err + } + + err = writeVarInt(w, pver, uint64(numSHashes)) + if err != nil { + return err + } + for _, hash := range msg.SHashes { + err = writeElement(w, hash) + if err != nil { + return err + } + } + err = writeVarBytes(w, pver, msg.Flags) if err != nil { return err @@ -151,13 +206,14 @@ func (msg *MsgMerkleBlock) MaxPayloadLength(pver uint32) uint32 { return MaxBlockPayload } -// NewMsgMerkleBlock returns a new bitcoin merkleblock message that conforms to +// NewMsgMerkleBlock returns a new decred merkleblock message that conforms to // the Message interface. See MsgMerkleBlock for details. func NewMsgMerkleBlock(bh *BlockHeader) *MsgMerkleBlock { return &MsgMerkleBlock{ Header: *bh, Transactions: 0, - Hashes: make([]*ShaHash, 0), + Hashes: make([]*chainhash.Hash, 0), + SHashes: make([]*chainhash.Hash, 0), Flags: make([]byte, 0), } } diff --git a/wire/msgmerkleblock_test.go b/wire/msgmerkleblock_test.go index 482a1c4a..2e157c8d 100644 --- a/wire/msgmerkleblock_test.go +++ b/wire/msgmerkleblock_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,20 +13,35 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestMerkleBlock tests the MsgMerkleBlock API. func TestMerkleBlock(t *testing.T) { pver := wire.ProtocolVersion - // Block 1 header. - prevHash := &blockOne.Header.PrevBlock - merkleHash := &blockOne.Header.MerkleRoot - bits := blockOne.Header.Bits - nonce := blockOne.Header.Nonce - bh := wire.NewBlockHeader(prevHash, merkleHash, bits, nonce) + // Test block header. + bh := wire.NewBlockHeader( + int32(pver), + &testBlock.Header.PrevBlock, // PrevHash + &testBlock.Header.MerkleRoot, // MerkleRootHash + &testBlock.Header.StakeRoot, // StakeRoot + uint16(0x0000), // VoteBits + [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState + uint16(0x0000), // Voters + uint8(0x00), // FreshStake + uint8(0x00), // Revocations + uint32(0), // Poolsize + testBlock.Header.Bits, // Bits + int64(0x0000000000000000), // Sbits + uint32(1), // Height + uint32(0), // Size + testBlock.Header.Nonce, // Nonce + [36]byte{}, // ExtraData + ) // Ensure the command is expected value. wantCmd := "merkleblock" @@ -47,9 +63,9 @@ func TestMerkleBlock(t *testing.T) { // Load maxTxPerBlock hashes data := make([]byte, 32) - for i := 0; i < wire.MaxTxPerBlock; i++ { + for i := 0; i < wire.MaxTxPerTxTree; i++ { rand.Read(data) - hash, err := wire.NewShaHash(data) + hash, err := chainhash.NewHash(data) if err != nil { t.Errorf("NewShaHash failed: %v\n", err) return @@ -59,11 +75,15 @@ func TestMerkleBlock(t *testing.T) { t.Errorf("AddTxHash failed: %v\n", err) return } + if err = msg.AddSTxHash(hash); err != nil { + t.Errorf("AddSTxHash failed: %v\n", err) + return + } } // Add one more Tx to test failure. rand.Read(data) - hash, err := wire.NewShaHash(data) + hash, err := chainhash.NewHash(data) if err != nil { t.Errorf("NewShaHash failed: %v\n", err) return @@ -74,6 +94,19 @@ func TestMerkleBlock(t *testing.T) { return } + // Add one more STx to test failure. + rand.Read(data) + hash, err = chainhash.NewHash(data) + if err != nil { + t.Errorf("NewShaHash failed: %v\n", err) + return + } + + if err = msg.AddSTxHash(hash); err == nil { + t.Errorf("AddTxHash succeeded when it should have failed") + return + } + // Test encode with latest protocol version. var buf bytes.Buffer err = msg.BtcEncode(&buf, pver) @@ -109,35 +142,6 @@ func TestMerkleBlock(t *testing.T) { } } -// TestMerkleBlockCrossProtocol tests the MsgMerkleBlock API when encoding with -// the latest protocol version and decoding with BIP0031Version. -func TestMerkleBlockCrossProtocol(t *testing.T) { - // Block 1 header. - prevHash := &blockOne.Header.PrevBlock - merkleHash := &blockOne.Header.MerkleRoot - bits := blockOne.Header.Bits - nonce := blockOne.Header.Nonce - bh := wire.NewBlockHeader(prevHash, merkleHash, bits, nonce) - - msg := wire.NewMsgMerkleBlock(bh) - - // Encode with latest protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of NewMsgFilterLoad failed %v err <%v>", msg, - err) - } - - // Decode with old protocol version. - var readmsg wire.MsgFilterLoad - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err == nil { - t.Errorf("decode of MsgFilterLoad succeeded when it shouldn't have %v", - msg) - } -} - // TestMerkleBlockWire tests the MsgMerkleBlock wire encode and decode for // various numbers of transaction hashes and protocol versions. func TestMerkleBlockWire(t *testing.T) { @@ -149,14 +153,8 @@ func TestMerkleBlockWire(t *testing.T) { }{ // Latest protocol version. { - &merkleBlockOne, &merkleBlockOne, merkleBlockOneBytes, - wire.ProtocolVersion, - }, - - // Protocol version BIP0037Version. - { - &merkleBlockOne, &merkleBlockOne, merkleBlockOneBytes, - wire.BIP0037Version, + &testMerkleBlock, &testMerkleBlock, + testMerkleBlockBytes, wire.ProtocolVersion, }, } @@ -197,9 +195,7 @@ func TestMerkleBlockWireErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // because the test data is using bytes encoded with that protocol // version. - pver := uint32(70001) - pverNoMerkleBlock := wire.BIP0037Version - 1 - wireErr := &wire.MessageError{} + pver := uint32(1) tests := []struct { in *wire.MsgMerkleBlock // Value to encode @@ -209,65 +205,110 @@ func TestMerkleBlockWireErrors(t *testing.T) { writeErr error // Expected write error readErr error // Expected read error }{ - // Force error in version. + // Force error in version. [0] { - &merkleBlockOne, merkleBlockOneBytes, pver, 0, + &testMerkleBlock, testMerkleBlockBytes, pver, 0, io.ErrShortWrite, io.EOF, }, - // Force error in prev block hash. + // Force error in prev block hash. [1] { - &merkleBlockOne, merkleBlockOneBytes, pver, 4, + &testMerkleBlock, testMerkleBlockBytes, pver, 4, io.ErrShortWrite, io.EOF, }, - // Force error in merkle root. + // Force error in merkle root. [2] { - &merkleBlockOne, merkleBlockOneBytes, pver, 36, + &testMerkleBlock, testMerkleBlockBytes, pver, 36, io.ErrShortWrite, io.EOF, }, - // Force error in timestamp. + // Force error in stake merkle root. [3] { - &merkleBlockOne, merkleBlockOneBytes, pver, 68, + &testMerkleBlock, testMerkleBlockBytes, pver, 68, io.ErrShortWrite, io.EOF, }, - // Force error in difficulty bits. + // Force error in VoteBits. [4] { - &merkleBlockOne, merkleBlockOneBytes, pver, 72, + &testMerkleBlock, testMerkleBlockBytes, pver, 100, io.ErrShortWrite, io.EOF, }, - // Force error in header nonce. + // Force error in FinalState. [5] { - &merkleBlockOne, merkleBlockOneBytes, pver, 76, + &testMerkleBlock, testMerkleBlockBytes, pver, 102, io.ErrShortWrite, io.EOF, }, - // Force error in transaction count. + // Force error in Voters. [6] { - &merkleBlockOne, merkleBlockOneBytes, pver, 80, + &testMerkleBlock, testMerkleBlockBytes, pver, 108, io.ErrShortWrite, io.EOF, }, - // Force error in num hashes. + // Force error in FreshStake. [7] { - &merkleBlockOne, merkleBlockOneBytes, pver, 84, + &testMerkleBlock, testMerkleBlockBytes, pver, 110, io.ErrShortWrite, io.EOF, }, - // Force error in hashes. + // Force error in Revocations. [8] { - &merkleBlockOne, merkleBlockOneBytes, pver, 85, + &testMerkleBlock, testMerkleBlockBytes, pver, 111, io.ErrShortWrite, io.EOF, }, - // Force error in num flag bytes. + // Force error in poolsize. [9] { - &merkleBlockOne, merkleBlockOneBytes, pver, 117, + &testMerkleBlock, testMerkleBlockBytes, pver, 112, io.ErrShortWrite, io.EOF, }, - // Force error in flag bytes. + // Force error in difficulty bits. [10] { - &merkleBlockOne, merkleBlockOneBytes, pver, 118, + &testMerkleBlock, testMerkleBlockBytes, pver, 116, io.ErrShortWrite, io.EOF, }, - // Force error due to unsupported protocol version. + // Force error in stake difficulty bits. [11] { - &merkleBlockOne, merkleBlockOneBytes, pverNoMerkleBlock, - 119, wireErr, wireErr, + &testMerkleBlock, testMerkleBlockBytes, pver, 120, + io.ErrShortWrite, io.EOF, + }, + // Force error in height. [12] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 128, + io.ErrShortWrite, io.EOF, + }, + // Force error in size. [13] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 132, + io.ErrShortWrite, io.EOF, + }, + // Force error in timestamp. [14] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 136, + io.ErrShortWrite, io.EOF, + }, + // Force error in header nonce. [15] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 140, + io.ErrShortWrite, io.EOF, + }, + // Force error in transaction count. [16] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 180, + io.ErrShortWrite, io.EOF, + }, + // Force error in num hashes. [17] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 184, + io.ErrShortWrite, io.EOF, + }, + // Force error in hashes. [18] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 185, + io.ErrShortWrite, io.EOF, + }, + // Force error in num flag bytes. [19] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 254, + io.ErrShortWrite, io.EOF, + }, + // Force error in flag bytes. [20] + { + &testMerkleBlock, testMerkleBlockBytes, pver, 255, + io.ErrShortWrite, io.EOF, }, } @@ -322,24 +363,24 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. - pver := uint32(70001) + pver := uint32(1) // Create bytes for a merkle block that claims to have more than the max // allowed tx hashes. var buf bytes.Buffer - wire.TstWriteVarInt(&buf, pver, wire.MaxTxPerBlock+1) - numHashesOffset := 84 + wire.TstWriteVarInt(&buf, pver, wire.MaxTxPerTxTree+1) + numHashesOffset := 140 exceedMaxHashes := make([]byte, numHashesOffset) - copy(exceedMaxHashes, merkleBlockOneBytes[:numHashesOffset]) + copy(exceedMaxHashes, testMerkleBlockBytes[:numHashesOffset]) exceedMaxHashes = append(exceedMaxHashes, buf.Bytes()...) // Create bytes for a merkle block that claims to have more than the max // allowed flag bytes. buf.Reset() wire.TstWriteVarInt(&buf, pver, wire.MaxFlagsPerMerkleBlock+1) - numFlagBytesOffset := 117 + numFlagBytesOffset := 210 exceedMaxFlagBytes := make([]byte, numFlagBytesOffset) - copy(exceedMaxFlagBytes, merkleBlockOneBytes[:numFlagBytesOffset]) + copy(exceedMaxFlagBytes, testMerkleBlockBytes[:numFlagBytesOffset]) exceedMaxFlagBytes = append(exceedMaxFlagBytes, buf.Bytes()...) tests := []struct { @@ -348,9 +389,9 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { err error // Expected error }{ // Block that claims to have more than max allowed hashes. - {exceedMaxHashes, pver, &wire.MessageError{}}, + {exceedMaxHashes, pver, io.ErrUnexpectedEOF}, // Block that claims to have more than max allowed flag bytes. - {exceedMaxFlagBytes, pver, &wire.MessageError{}}, + {exceedMaxFlagBytes, pver, io.ErrUnexpectedEOF}, } t.Logf("Running %d tests", len(tests)) @@ -367,30 +408,51 @@ func TestMerkleBlockOverflowErrors(t *testing.T) { } } -// merkleBlockOne is a merkle block created from block one of the block chain -// where the first transaction matches. -var merkleBlockOne = wire.MsgMerkleBlock{ +// testMerkleBlock is a basic normative merkle block that is used throughout the +// tests. +var testMerkleBlock = wire.MsgMerkleBlock{ Header: wire.BlockHeader{ Version: 1, - PrevBlock: wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, }), - MerkleRoot: wire.ShaHash([wire.HashSize]byte{ // Make go vet happy. + MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, }), - Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST - Bits: 0x1d00ffff, // 486604799 - Nonce: 0x9962e301, // 2573394689 + StakeRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, + }), + VoteBits: uint16(0x0000), + FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + Voters: uint16(0x0000), + FreshStake: uint8(0x00), + Revocations: uint8(0x00), + Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST + Bits: 0x1d00ffff, // 486604799 + SBits: int64(0x0000000000000000), + Nonce: 0x9962e301, // 2573394689 }, Transactions: 1, - Hashes: []*wire.ShaHash{ - (*wire.ShaHash)(&[wire.HashSize]byte{ // Make go vet happy. + Hashes: []*chainhash.Hash{ + (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, + }), + }, + STransactions: 1, + SHashes: []*chainhash.Hash{ + (*chainhash.Hash)(&[chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, @@ -400,9 +462,8 @@ var merkleBlockOne = wire.MsgMerkleBlock{ Flags: []byte{0x80}, } -// merkleBlockOneBytes is the serialized bytes for a merkle block created from -// block one of the block chain where the first transation matches. -var merkleBlockOneBytes = []byte{ +// testMerkleBlockBytes is the serialized bytes for the above test merkle block. +var testMerkleBlockBytes = []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, @@ -412,15 +473,39 @@ var merkleBlockOneBytes = []byte{ 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot - 0x61, 0xbc, 0x66, 0x49, // Timestamp - 0xff, 0xff, 0x00, 0x1d, // Bits - 0x01, 0xe3, 0x62, 0x99, // Nonce - 0x01, 0x00, 0x00, 0x00, // TxnCount - 0x01, // Num hashes 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, - 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // Hash - 0x01, // Num flag bytes - 0x80, // Flags + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot + 0x00, 0x00, // VoteBits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState + 0x00, 0x00, // Voters + 0x00, // FreshStake + 0x00, // Revocations + 0x00, 0x00, 0x00, 0x00, // Poolsize + 0xff, 0xff, 0x00, 0x1d, // Bits + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits + 0x00, 0x00, 0x00, 0x00, // Height + 0x00, 0x00, 0x00, 0x00, // Size + 0x61, 0xbc, 0x66, 0x49, // Timestamp + 0x01, 0xe3, 0x62, 0x99, // Nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, // TxnCount (regular) [180] + 0x01, // Num hashes (regular) [184] + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // Hash [185] + 0x01, 0x00, 0x00, 0x00, // TxnCount (stake) [217] + 0x01, // Num hashes (stake) [221] + 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, + 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, + 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, + 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // Hash [222] + 0x01, // Num flag bytes [254] + 0x80, // Flags [255] } diff --git a/wire/msgminingstate.go b/wire/msgminingstate.go new file mode 100644 index 00000000..85524fe1 --- /dev/null +++ b/wire/msgminingstate.go @@ -0,0 +1,197 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "fmt" + "io" + + "github.com/decred/dcrd/chaincfg/chainhash" +) + +// MaxMSBlocksAtHeadPerMsg is the maximum number of block hashes allowed +// per message. +const MaxMSBlocksAtHeadPerMsg = 8 + +// MaxMSVotesAtHeadPerMsg is the maximum number of votes at head per message. +const MaxMSVotesAtHeadPerMsg = 40 // 8 * 5 + +// MsgMiningState implements the Message interface and represents a mining state +// message. It is used to request a list of blocks located at the chain tip +// along with all votes for those blocks. The list is returned is limited by +// the maximum number of blocks per message and the maximum number of votes per +// message. +type MsgMiningState struct { + ProtocolVersion uint32 + Height uint32 + BlockHashes []*chainhash.Hash + VoteHashes []*chainhash.Hash +} + +// AddBlockHash adds a new block hash to the message. +func (msg *MsgMiningState) AddBlockHash(hash *chainhash.Hash) error { + if len(msg.BlockHashes)+1 > MaxMSBlocksAtHeadPerMsg { + str := fmt.Sprintf("too many block hashes for message [max %v]", + MaxMSBlocksAtHeadPerMsg) + return messageError("MsgMiningState.AddBlockHash", str) + } + + msg.BlockHashes = append(msg.BlockHashes, hash) + return nil +} + +// AddVoteHash adds a new vote hash to the message. +func (msg *MsgMiningState) AddVoteHash(hash *chainhash.Hash) error { + if len(msg.VoteHashes)+1 > MaxMSVotesAtHeadPerMsg { + str := fmt.Sprintf("too many vote hashes for message [max %v]", + MaxMSVotesAtHeadPerMsg) + return messageError("MsgMiningState.AddVoteHash", str) + } + + msg.VoteHashes = append(msg.VoteHashes, hash) + return nil +} + +// BtcDecode decodes r using the protocol encoding into the receiver. +// This is part of the Message interface implementation. +func (msg *MsgMiningState) BtcDecode(r io.Reader, pver uint32) error { + err := readElement(r, &msg.ProtocolVersion) + if err != nil { + return err + } + + err = readElement(r, &msg.Height) + if err != nil { + return err + } + + // Read num block hashes and limit to max. + count, err := readVarInt(r, pver) + if err != nil { + return err + } + if count > MaxMSBlocksAtHeadPerMsg { + str := fmt.Sprintf("too many block hashes for message "+ + "[count %v, max %v]", count, MaxMSBlocksAtHeadPerMsg) + return messageError("MsgMiningState.BtcDecode", str) + } + + msg.BlockHashes = make([]*chainhash.Hash, 0, count) + for i := uint64(0); i < count; i++ { + sha := chainhash.Hash{} + err := readElement(r, &sha) + if err != nil { + return err + } + msg.AddBlockHash(&sha) + } + + // Read num vote hashes and limit to max. + count, err = readVarInt(r, pver) + if err != nil { + return err + } + if count > MaxMSVotesAtHeadPerMsg { + str := fmt.Sprintf("too many vote hashes for message "+ + "[count %v, max %v]", count, MaxMSVotesAtHeadPerMsg) + return messageError("MsgMiningState.BtcDecode", str) + } + + msg.VoteHashes = make([]*chainhash.Hash, 0, count) + for i := uint64(0); i < count; i++ { + sha := chainhash.Hash{} + err := readElement(r, &sha) + if err != nil { + return err + } + msg.AddVoteHash(&sha) + } + + return nil +} + +// BtcEncode encodes the receiver to w using the protocol encoding. +// This is part of the Message interface implementation. +func (msg *MsgMiningState) BtcEncode(w io.Writer, pver uint32) error { + err := writeElement(w, msg.ProtocolVersion) + if err != nil { + return err + } + + err = writeElement(w, msg.Height) + if err != nil { + return err + } + + // Write block hashes. + count := len(msg.BlockHashes) + if count > MaxMSBlocksAtHeadPerMsg { + str := fmt.Sprintf("too many block hashes for message "+ + "[count %v, max %v]", count, MaxMSBlocksAtHeadPerMsg) + return messageError("MsgMiningState.BtcEncode", str) + } + + err = writeVarInt(w, pver, uint64(count)) + if err != nil { + return err + } + + for _, hash := range msg.BlockHashes { + err = writeElement(w, hash) + if err != nil { + return err + } + } + + // Write vote hashes. + count = len(msg.VoteHashes) + if count > MaxMSVotesAtHeadPerMsg { + str := fmt.Sprintf("too many vote hashes for message "+ + "[count %v, max %v]", count, MaxMSVotesAtHeadPerMsg) + return messageError("MsgMiningState.BtcEncode", str) + } + + err = writeVarInt(w, pver, uint64(count)) + if err != nil { + return err + } + + for _, hash := range msg.VoteHashes { + err = writeElement(w, hash) + if err != nil { + return err + } + } + + return nil +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgMiningState) Command() string { + return CmdMiningState +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgMiningState) MaxPayloadLength(pver uint32) uint32 { + // Protocol version 4 bytes + Height 4 bytes + num block hashes (varInt) + + // block hashes + num vote hashes (varInt) + vote hashes + return 4 + 4 + MaxVarIntPayload + (MaxMSBlocksAtHeadPerMsg * + chainhash.HashSize) + MaxVarIntPayload + (MaxMSVotesAtHeadPerMsg * + chainhash.HashSize) +} + +// NewMsgMiningState returns a new decred miningstate message that conforms to +// the Message interface using the defaults for the fields. +func NewMsgMiningState() *MsgMiningState { + return &MsgMiningState{ + ProtocolVersion: ProtocolVersion, + Height: 0, + BlockHashes: make([]*chainhash.Hash, 0, MaxMSBlocksAtHeadPerMsg), + VoteHashes: make([]*chainhash.Hash, 0, MaxMSVotesAtHeadPerMsg), + } +} diff --git a/wire/msgminingstate_test.go b/wire/msgminingstate_test.go new file mode 100644 index 00000000..5aa15528 --- /dev/null +++ b/wire/msgminingstate_test.go @@ -0,0 +1,116 @@ +// msgminingstate_test.go +package wire_test + +import ( + "bytes" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" +) + +// TestMiningStateWire tests the MsgMiningState wire encode and decode for a sample +// message containing a fake block header and some fake vote hashes. +func TestMiningStateWire(t *testing.T) { + // Empty tx message. + sampleMSMsg := wire.NewMsgMiningState() + sampleMSMsg.ProtocolVersion = wire.ProtocolVersion + sampleMSMsg.Height = 123456 + + fakeBlock, _ := chainhash.NewHashFromStr("4433221144332211443322114" + + "433221144332211443322114433221144332211") + err := sampleMSMsg.AddBlockHash(fakeBlock) + if err != nil { + t.Errorf("unexpected error for AddBlockHash: %v", err.Error()) + } + + fakeVote1, _ := chainhash.NewHashFromStr("2222111122221111222211112" + + "222111122221111222211112222111122221111") + fakeVote2, _ := chainhash.NewHashFromStr("4444333344443333444433334" + + "444333344443333444433334444333344443333") + fakeVote3, _ := chainhash.NewHashFromStr("6666555566665555666655556" + + "666555566665555666655556666555566665555") + err = sampleMSMsg.AddVoteHash(fakeVote1) + if err != nil { + t.Errorf("unexpected error for AddVoteHash 1: %v", err.Error()) + } + err = sampleMSMsg.AddVoteHash(fakeVote2) + if err != nil { + t.Errorf("unexpected error for AddVoteHash 2: %v", err.Error()) + } + err = sampleMSMsg.AddVoteHash(fakeVote3) + if err != nil { + t.Errorf("unexpected error for AddVoteHash 3: %v", err.Error()) + } + + sampleMSMsgEncoded := []byte{ + 0x01, 0x00, 0x00, 0x00, // Version + 0x40, 0xe2, 0x01, 0x00, // Height 0001e240 in BE + 0x01, + 0x11, 0x22, 0x33, 0x44, 0x11, 0x22, 0x33, 0x44, // Dummy Block + 0x11, 0x22, 0x33, 0x44, 0x11, 0x22, 0x33, 0x44, + 0x11, 0x22, 0x33, 0x44, 0x11, 0x22, 0x33, 0x44, + 0x11, 0x22, 0x33, 0x44, 0x11, 0x22, 0x33, 0x44, + 0x03, // Varint for number of votes + 0x11, 0x11, 0x22, 0x22, 0x11, 0x11, 0x22, 0x22, // Dummy votes [1] + 0x11, 0x11, 0x22, 0x22, 0x11, 0x11, 0x22, 0x22, + 0x11, 0x11, 0x22, 0x22, 0x11, 0x11, 0x22, 0x22, + 0x11, 0x11, 0x22, 0x22, 0x11, 0x11, 0x22, 0x22, + 0x33, 0x33, 0x44, 0x44, 0x33, 0x33, 0x44, 0x44, // [2] + 0x33, 0x33, 0x44, 0x44, 0x33, 0x33, 0x44, 0x44, + 0x33, 0x33, 0x44, 0x44, 0x33, 0x33, 0x44, 0x44, + 0x33, 0x33, 0x44, 0x44, 0x33, 0x33, 0x44, 0x44, + 0x55, 0x55, 0x66, 0x66, 0x55, 0x55, 0x66, 0x66, // [3] + 0x55, 0x55, 0x66, 0x66, 0x55, 0x55, 0x66, 0x66, + 0x55, 0x55, 0x66, 0x66, 0x55, 0x55, 0x66, 0x66, + 0x55, 0x55, 0x66, 0x66, 0x55, 0x55, 0x66, 0x66, + } + + tests := []struct { + in *wire.MsgMiningState // Message to encode + out *wire.MsgMiningState // Expected decoded message + buf []byte // Wire encoding + pver uint32 // Protocol version for wire encoding + }{ + // Latest protocol version sample message. + { + sampleMSMsg, + sampleMSMsg, + sampleMSMsgEncoded, + wire.ProtocolVersion, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Encode the message to wire format. + var buf bytes.Buffer + err := test.in.BtcEncode(&buf, test.pver) + if err != nil { + t.Errorf("BtcEncode #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("BtcEncode #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Decode the message from wire format. + var msg wire.MsgMiningState + rbuf := bytes.NewReader(test.buf) + err = msg.BtcDecode(rbuf, test.pver) + if err != nil { + t.Errorf("BtcDecode #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&msg, test.out) { + t.Errorf("BtcDecode #%d\n got: %s want: %s", i, + spew.Sdump(&msg), spew.Sdump(test.out)) + continue + } + } +} diff --git a/wire/msgnotfound.go b/wire/msgnotfound.go index 5193d620..64669913 100644 --- a/wire/msgnotfound.go +++ b/wire/msgnotfound.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,7 +10,7 @@ import ( "io" ) -// MsgNotFound defines a bitcoin notfound message which is sent in response to +// MsgNotFound defines a decred notfound message which is sent in response to // a getdata message if any of the requested data in not available on the peer. // Each message is limited to a maximum number of inventory vectors, which is // currently 50,000. @@ -32,7 +33,7 @@ func (msg *MsgNotFound) AddInvVect(iv *InvVect) error { return nil } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgNotFound) BtcDecode(r io.Reader, pver uint32) error { count, err := readVarInt(r, pver) @@ -59,7 +60,7 @@ func (msg *MsgNotFound) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgNotFound) BtcEncode(w io.Writer, pver uint32) error { // Limit to max inventory vectors per message. @@ -98,7 +99,7 @@ func (msg *MsgNotFound) MaxPayloadLength(pver uint32) uint32 { return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload) } -// NewMsgNotFound returns a new bitcoin notfound message that conforms to the +// NewMsgNotFound returns a new decred notfound message that conforms to the // Message interface. See MsgNotFound for details. func NewMsgNotFound() *MsgNotFound { return &MsgNotFound{ diff --git a/wire/msgnotfound_test.go b/wire/msgnotfound_test.go index 3a71cd81..4c6e6e75 100644 --- a/wire/msgnotfound_test.go +++ b/wire/msgnotfound_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,10 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" ) // TestNotFound tests the MsgNotFound API. @@ -37,7 +40,7 @@ func TestNotFound(t *testing.T) { } // Ensure inventory vectors are added properly. - hash := wire.ShaHash{} + hash := chainhash.Hash{} iv := wire.NewInvVect(wire.InvTypeBlock, &hash) err := msg.AddInvVect(iv) if err != nil { @@ -66,14 +69,14 @@ func TestNotFound(t *testing.T) { func TestNotFoundWire(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Transation 1 of Block 203707 hash. hashStr = "d28a3dc7392bf00a9855ee93dd9a81eff82a2c4fe57fbd42cfe71b487accfaf0" - txHash, err := wire.NewShaHashFromStr(hashStr) + txHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -126,70 +129,6 @@ func TestNotFoundWire(t *testing.T) { MultiInvEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion no inv vectors. - { - NoInv, - NoInv, - NoInvEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion with multiple inv vectors. - { - MultiInv, - MultiInv, - MultiInvEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -231,7 +170,7 @@ func TestNotFoundWireErrors(t *testing.T) { // Block 203707 hash. hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc" - blockHash, err := wire.NewShaHashFromStr(hashStr) + blockHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } diff --git a/wire/msgping.go b/wire/msgping.go index c9e3f646..25d90abf 100644 --- a/wire/msgping.go +++ b/wire/msgping.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,7 +9,7 @@ import ( "io" ) -// MsgPing implements the Message interface and represents a bitcoin ping +// MsgPing implements the Message interface and represents a decred ping // message. // // For versions BIP0031Version and earlier, it is used primarily to confirm @@ -25,33 +26,23 @@ type MsgPing struct { Nonce uint64 } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgPing) BtcDecode(r io.Reader, pver uint32) error { - // There was no nonce for BIP0031Version and earlier. - // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver > BIP0031Version { - err := readElement(r, &msg.Nonce) - if err != nil { - return err - } + err := readElement(r, &msg.Nonce) + if err != nil { + return err } return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgPing) BtcEncode(w io.Writer, pver uint32) error { - // There was no nonce for BIP0031Version and earlier. - // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver > BIP0031Version { - err := writeElement(w, msg.Nonce) - if err != nil { - return err - } + err := writeElement(w, msg.Nonce) + if err != nil { + return err } return nil @@ -67,18 +58,14 @@ func (msg *MsgPing) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgPing) MaxPayloadLength(pver uint32) uint32 { plen := uint32(0) - // There was no nonce for BIP0031Version and earlier. - // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver > BIP0031Version { - // Nonce 8 bytes. - plen += 8 - } + + // Nonce 8 bytes. + plen += 8 return plen } -// NewMsgPing returns a new bitcoin ping message that conforms to the Message +// NewMsgPing returns a new decred ping message that conforms to the Message // interface. See MsgPing for details. func NewMsgPing(nonce uint64) *MsgPing { return &MsgPing{ diff --git a/wire/msgping_test.go b/wire/msgping_test.go index ae2ab214..07a1d325 100644 --- a/wire/msgping_test.go +++ b/wire/msgping_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestPing tests the MsgPing API against the latest protocol version. @@ -48,88 +49,6 @@ func TestPing(t *testing.T) { return } -// TestPingBIP0031 tests the MsgPing API against the protocol version -// BIP0031Version. -func TestPingBIP0031(t *testing.T) { - // Use the protocol version just prior to BIP0031Version changes. - pver := wire.BIP0031Version - - nonce, err := wire.RandomUint64() - if err != nil { - t.Errorf("RandomUint64: Error generating nonce: %v", err) - } - msg := wire.NewMsgPing(nonce) - if msg.Nonce != nonce { - t.Errorf("NewMsgPing: wrong nonce - got %v, want %v", - msg.Nonce, nonce) - } - - // Ensure max payload is expected value for old protocol version. - wantPayload := uint32(0) - maxPayload := msg.MaxPayloadLength(pver) - if maxPayload != wantPayload { - t.Errorf("MaxPayloadLength: wrong max payload length for "+ - "protocol version %d - got %v, want %v", pver, - maxPayload, wantPayload) - } - - // Test encode with old protocol version. - var buf bytes.Buffer - err = msg.BtcEncode(&buf, pver) - if err != nil { - t.Errorf("encode of MsgPing failed %v err <%v>", msg, err) - } - - // Test decode with old protocol version. - readmsg := wire.NewMsgPing(0) - err = readmsg.BtcDecode(&buf, pver) - if err != nil { - t.Errorf("decode of MsgPing failed [%v] err <%v>", buf, err) - } - - // Since this protocol version doesn't support the nonce, make sure - // it didn't get encoded and decoded back out. - if msg.Nonce == readmsg.Nonce { - t.Errorf("Should not get same nonce for protocol version %d", pver) - } - - return -} - -// TestPingCrossProtocol tests the MsgPing API when encoding with the latest -// protocol version and decoding with BIP0031Version. -func TestPingCrossProtocol(t *testing.T) { - nonce, err := wire.RandomUint64() - if err != nil { - t.Errorf("RandomUint64: Error generating nonce: %v", err) - } - msg := wire.NewMsgPing(nonce) - if msg.Nonce != nonce { - t.Errorf("NewMsgPing: wrong nonce - got %v, want %v", - msg.Nonce, nonce) - } - - // Encode with latest protocol version. - var buf bytes.Buffer - err = msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of MsgPing failed %v err <%v>", msg, err) - } - - // Decode with old protocol version. - readmsg := wire.NewMsgPing(0) - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err != nil { - t.Errorf("decode of MsgPing failed [%v] err <%v>", buf, err) - } - - // Since one of the protocol versions doesn't support the nonce, make - // sure it didn't get encoded and decoded back out. - if msg.Nonce == readmsg.Nonce { - t.Error("Should not get same nonce for cross protocol") - } -} - // TestPingWire tests the MsgPing wire encode and decode for various protocol // versions. func TestPingWire(t *testing.T) { @@ -146,22 +65,6 @@ func TestPingWire(t *testing.T) { []byte{0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}, wire.ProtocolVersion, }, - - // Protocol version BIP0031Version+1 - { - wire.MsgPing{Nonce: 456456}, // 0x6f708 - wire.MsgPing{Nonce: 456456}, // 0x6f708 - []byte{0x08, 0xf7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00}, - wire.BIP0031Version + 1, - }, - - // Protocol version BIP0031Version - { - wire.MsgPing{Nonce: 789789}, // 0xc0d1d - wire.MsgPing{Nonce: 0}, // No nonce for pver - []byte{}, // No nonce for pver - wire.BIP0031Version, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgpong.go b/wire/msgpong.go index 4c89acc8..7ab25dc2 100644 --- a/wire/msgpong.go +++ b/wire/msgpong.go @@ -1,17 +1,17 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire import ( - "fmt" "io" ) -// MsgPong implements the Message interface and represents a bitcoin pong +// MsgPong implements the Message interface and represents a decred pong // message which is used primarily to confirm that a connection is still valid -// in response to a bitcoin ping message (MsgPing). +// in response to a decred ping message (MsgPing). // // This message was not added until protocol versions AFTER BIP0031Version. type MsgPong struct { @@ -20,17 +20,9 @@ type MsgPong struct { Nonce uint64 } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgPong) BtcDecode(r io.Reader, pver uint32) error { - // NOTE: <= is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver <= BIP0031Version { - str := fmt.Sprintf("pong message invalid for protocol "+ - "version %d", pver) - return messageError("MsgPong.BtcDecode", str) - } - err := readElement(r, &msg.Nonce) if err != nil { return err @@ -39,17 +31,9 @@ func (msg *MsgPong) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgPong) BtcEncode(w io.Writer, pver uint32) error { - // NOTE: <= is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver <= BIP0031Version { - str := fmt.Sprintf("pong message invalid for protocol "+ - "version %d", pver) - return messageError("MsgPong.BtcEncode", str) - } - err := writeElement(w, msg.Nonce) if err != nil { return err @@ -68,18 +52,14 @@ func (msg *MsgPong) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgPong) MaxPayloadLength(pver uint32) uint32 { plen := uint32(0) - // The pong message did not exist for BIP0031Version and earlier. - // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER - // the version unlike most others. - if pver > BIP0031Version { - // Nonce 8 bytes. - plen += 8 - } + + // Nonce 8 bytes. + plen += 8 return plen } -// NewMsgPong returns a new bitcoin pong message that conforms to the Message +// NewMsgPong returns a new decred pong message that conforms to the Message // interface. See MsgPong for details. func NewMsgPong(nonce uint64) *MsgPong { return &MsgPong{ diff --git a/wire/msgpong_test.go b/wire/msgpong_test.go index aac46fc4..77246fbb 100644 --- a/wire/msgpong_test.go +++ b/wire/msgpong_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestPongLatest tests the MsgPong API against the latest protocol version. @@ -66,87 +67,6 @@ func TestPongLatest(t *testing.T) { return } -// TestPongBIP0031 tests the MsgPong API against the protocol version -// BIP0031Version. -func TestPongBIP0031(t *testing.T) { - // Use the protocol version just prior to BIP0031Version changes. - pver := wire.BIP0031Version - - nonce, err := wire.RandomUint64() - if err != nil { - t.Errorf("Error generating nonce: %v", err) - } - msg := wire.NewMsgPong(nonce) - if msg.Nonce != nonce { - t.Errorf("Should get same nonce back out.") - } - - // Ensure max payload is expected value for old protocol version. - size := msg.MaxPayloadLength(pver) - if size != 0 { - t.Errorf("Max length should be 0 for pong protocol version %d.", - pver) - } - - // Test encode with old protocol version. - var buf bytes.Buffer - err = msg.BtcEncode(&buf, pver) - if err == nil { - t.Errorf("encode of MsgPong succeeded when it shouldn't have %v", - msg) - } - - // Test decode with old protocol version. - readmsg := wire.NewMsgPong(0) - err = readmsg.BtcDecode(&buf, pver) - if err == nil { - t.Errorf("decode of MsgPong succeeded when it shouldn't have %v", - spew.Sdump(buf)) - } - - // Since this protocol version doesn't support pong, make sure the - // nonce didn't get encoded and decoded back out. - if msg.Nonce == readmsg.Nonce { - t.Errorf("Should not get same nonce for protocol version %d", pver) - } - - return -} - -// TestPongCrossProtocol tests the MsgPong API when encoding with the latest -// protocol version and decoding with BIP0031Version. -func TestPongCrossProtocol(t *testing.T) { - nonce, err := wire.RandomUint64() - if err != nil { - t.Errorf("Error generating nonce: %v", err) - } - msg := wire.NewMsgPong(nonce) - if msg.Nonce != nonce { - t.Errorf("Should get same nonce back out.") - } - - // Encode with latest protocol version. - var buf bytes.Buffer - err = msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of MsgPong failed %v err <%v>", msg, err) - } - - // Decode with old protocol version. - readmsg := wire.NewMsgPong(0) - err = readmsg.BtcDecode(&buf, wire.BIP0031Version) - if err == nil { - t.Errorf("encode of MsgPong succeeded when it shouldn't have %v", - msg) - } - - // Since one of the protocol versions doesn't support the pong message, - // make sure the nonce didn't get encoded and decoded back out. - if msg.Nonce == readmsg.Nonce { - t.Error("Should not get same nonce for cross protocol") - } -} - // TestPongWire tests the MsgPong wire encode and decode for various protocol // versions. func TestPongWire(t *testing.T) { @@ -163,14 +83,6 @@ func TestPongWire(t *testing.T) { []byte{0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}, wire.ProtocolVersion, }, - - // Protocol version BIP0031Version+1 - { - wire.MsgPong{Nonce: 456456}, // 0x6f708 - wire.MsgPong{Nonce: 456456}, // 0x6f708 - []byte{0x08, 0xf7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00}, - wire.BIP0031Version + 1, - }, } t.Logf("Running %d tests", len(tests)) @@ -208,8 +120,6 @@ func TestPongWire(t *testing.T) { // of MsgPong to confirm error paths work correctly. func TestPongWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverNoPong := wire.BIP0031Version - wireErr := &wire.MessageError{} basePong := wire.NewMsgPong(123123) // 0x1e0f3 basePongEncoded := []byte{ @@ -227,8 +137,6 @@ func TestPongWireErrors(t *testing.T) { // Latest protocol version with intentional read/write errors. // Force error in nonce. {basePong, basePongEncoded, pver, 0, io.ErrShortWrite, io.EOF}, - // Force error due to unsupported protocol version. - {basePong, basePongEncoded, pverNoPong, 4, wireErr, wireErr}, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgreject.go b/wire/msgreject.go index f1ad73a8..b9757736 100644 --- a/wire/msgreject.go +++ b/wire/msgreject.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,6 +8,8 @@ package wire import ( "fmt" "io" + + "github.com/decred/dcrd/chaincfg/chainhash" ) // RejectCode represents a numeric value by which a remote peer indicates @@ -46,7 +49,7 @@ func (code RejectCode) String() string { return fmt.Sprintf("Unknown RejectCode (%d)", uint8(code)) } -// MsgReject implements the Message interface and represents a bitcoin reject +// MsgReject implements the Message interface and represents a decred reject // message. // // This message was not added until protocol version RejectVersion. @@ -66,18 +69,12 @@ type MsgReject struct { // Hash identifies a specific block or transaction that was rejected // and therefore only applies the MsgBlock and MsgTx messages. - Hash ShaHash + Hash chainhash.Hash } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgReject) BtcDecode(r io.Reader, pver uint32) error { - if pver < RejectVersion { - str := fmt.Sprintf("reject message invalid for protocol "+ - "version %d", pver) - return messageError("MsgReject.BtcDecode", str) - } - // Command that was rejected. cmd, err := readVarString(r, pver) if err != nil { @@ -111,15 +108,9 @@ func (msg *MsgReject) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgReject) BtcEncode(w io.Writer, pver uint32) error { - if pver < RejectVersion { - str := fmt.Sprintf("reject message invalid for protocol "+ - "version %d", pver) - return messageError("MsgReject.BtcEncode", str) - } - // Command that was rejected. err := writeVarString(w, pver, msg.Cmd) if err != nil { @@ -161,19 +152,15 @@ func (msg *MsgReject) Command() string { // receiver. This is part of the Message interface implementation. func (msg *MsgReject) MaxPayloadLength(pver uint32) uint32 { plen := uint32(0) - // The reject message did not exist before protocol version - // RejectVersion. - if pver >= RejectVersion { - // Unfortunately the bitcoin protocol does not enforce a sane - // limit on the length of the reason, so the max payload is the - // overall maximum message payload. - plen = MaxMessagePayload - } + // Unfortunately the decred protocol does not enforce a sane + // limit on the length of the reason, so the max payload is the + // overall maximum message payload. + plen = MaxMessagePayload return plen } -// NewMsgReject returns a new bitcoin reject message that conforms to the +// NewMsgReject returns a new decred reject message that conforms to the // Message interface. See MsgReject for details. func NewMsgReject(command string, code RejectCode, reason string) *MsgReject { return &MsgReject{ diff --git a/wire/msgreject_test.go b/wire/msgreject_test.go index 5645f97f..f6266d42 100644 --- a/wire/msgreject_test.go +++ b/wire/msgreject_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2014-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,8 +11,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestRejectCodeStringer tests the stringized output for the reject code type. @@ -119,109 +120,6 @@ func TestRejectLatest(t *testing.T) { } } -// TestRejectBeforeAdded tests the MsgReject API against a protocol version -// before the version which introduced it (RejectVersion). -func TestRejectBeforeAdded(t *testing.T) { - // Use the protocol version just prior to RejectVersion. - pver := wire.RejectVersion - 1 - - // Create reject message data. - rejCommand := (&wire.MsgBlock{}).Command() - rejCode := wire.RejectDuplicate - rejReason := "duplicate block" - rejHash := mainNetGenesisHash - - msg := wire.NewMsgReject(rejCommand, rejCode, rejReason) - msg.Hash = rejHash - - // Ensure max payload is expected value for old protocol version. - size := msg.MaxPayloadLength(pver) - if size != 0 { - t.Errorf("Max length should be 0 for reject protocol version %d.", - pver) - } - - // Test encode with old protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, pver) - if err == nil { - t.Errorf("encode of MsgReject succeeded when it shouldn't "+ - "have %v", msg) - } - - // // Test decode with old protocol version. - readMsg := wire.MsgReject{} - err = readMsg.BtcDecode(&buf, pver) - if err == nil { - t.Errorf("decode of MsgReject succeeded when it shouldn't "+ - "have %v", spew.Sdump(buf.Bytes())) - } - - // Since this protocol version doesn't support reject, make sure various - // fields didn't get encoded and decoded back out. - if msg.Cmd == readMsg.Cmd { - t.Errorf("Should not get same reject command for protocol "+ - "version %d", pver) - } - if msg.Code == readMsg.Code { - t.Errorf("Should not get same reject code for protocol "+ - "version %d", pver) - } - if msg.Reason == readMsg.Reason { - t.Errorf("Should not get same reject reason for protocol "+ - "version %d", pver) - } - if msg.Hash == readMsg.Hash { - t.Errorf("Should not get same reject hash for protocol "+ - "version %d", pver) - } -} - -// TestRejectCrossProtocol tests the MsgReject API when encoding with the latest -// protocol version and decoded with a version before the version which -// introduced it (RejectVersion). -func TestRejectCrossProtocol(t *testing.T) { - // Create reject message data. - rejCommand := (&wire.MsgBlock{}).Command() - rejCode := wire.RejectDuplicate - rejReason := "duplicate block" - rejHash := mainNetGenesisHash - - msg := wire.NewMsgReject(rejCommand, rejCode, rejReason) - msg.Hash = rejHash - - // Encode with latest protocol version. - var buf bytes.Buffer - err := msg.BtcEncode(&buf, wire.ProtocolVersion) - if err != nil { - t.Errorf("encode of MsgReject failed %v err <%v>", msg, err) - } - - // Decode with old protocol version. - readMsg := wire.MsgReject{} - err = readMsg.BtcDecode(&buf, wire.RejectVersion-1) - if err == nil { - t.Errorf("encode of MsgReject succeeded when it shouldn't "+ - "have %v", msg) - } - - // Since one of the protocol versions doesn't support the reject - // message, make sure the various fields didn't get encoded and decoded - // back out. - if msg.Cmd == readMsg.Cmd { - t.Errorf("Should not get same reject command for cross protocol") - } - if msg.Code == readMsg.Code { - t.Errorf("Should not get same reject code for cross protocol") - } - if msg.Reason == readMsg.Reason { - t.Errorf("Should not get same reject reason for cross protocol") - } - if msg.Hash == readMsg.Hash { - t.Errorf("Should not get same reject hash for cross protocol") - } -} - // TestRejectWire tests the MsgReject wire encode and decode for various // protocol versions. func TestRejectWire(t *testing.T) { @@ -303,8 +201,6 @@ func TestRejectWire(t *testing.T) { // of MsgReject to confirm error paths work correctly. func TestRejectWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverNoReject := wire.RejectVersion - 1 - wireErr := &wire.MessageError{} baseReject := wire.NewMsgReject("block", wire.RejectDuplicate, "duplicate block") @@ -337,8 +233,6 @@ func TestRejectWireErrors(t *testing.T) { {baseReject, baseRejectEncoded, pver, 7, io.ErrShortWrite, io.EOF}, // Force error in reject hash. {baseReject, baseRejectEncoded, pver, 23, io.ErrShortWrite, io.EOF}, - // Force error due to unsupported protocol version. - {baseReject, baseRejectEncoded, pverNoReject, 6, wireErr, wireErr}, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgtx.go b/wire/msgtx.go index 73a6fa47..aefa4fbf 100644 --- a/wire/msgtx.go +++ b/wire/msgtx.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -10,11 +11,13 @@ import ( "fmt" "io" "strconv" + + "github.com/decred/dcrd/chaincfg/chainhash" ) const ( // TxVersion is the current latest supported transaction version. - TxVersion = 1 + TxVersion uint16 = 1 // MaxTxInSequenceNum is the maximum sequence number the sequence field // of a transaction input can be. @@ -23,6 +26,25 @@ const ( // MaxPrevOutIndex is the maximum index the index field of a previous // outpoint can be. MaxPrevOutIndex uint32 = 0xffffffff + + // NoExpiryValue is the value of expiry that indicates the transaction + // has no expiry. + NoExpiryValue uint32 = 0 + + // NullValue is a null value for an input witness. + NullValueIn int64 = -1 + + // NullBlockHeight is the null value for an input witness. It references + // the genesis block. + NullBlockHeight uint32 = 0x00000000 + + // NullBlockIndex is the null transaction index in a block for an input + // witness. + NullBlockIndex uint32 = 0xffffffff + + // DefaultPkScriptVersion is the default pkScript version, referring to + // extended Decred script. + DefaultPkScriptVersion uint16 = 0x0000 ) // defaultTxInOutAlloc is the default size used for the backing array for @@ -34,9 +56,10 @@ const defaultTxInOutAlloc = 15 const ( // minTxInPayload is the minimum payload size for a transaction input. - // PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes + Varint for - // SignatureScript length 1 byte + Sequence 4 bytes. - minTxInPayload = 9 + HashSize + // PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes + + // PreviousOutPoint.Tree 1 byte + Varint for SignatureScript length 1 + // byte + Sequence 4 bytes. + minTxInPayload = 11 + chainhash.HashSize // maxTxInPerMessage is the maximum number of transactions inputs that // a transaction which fits into a message could possibly have. @@ -50,29 +73,102 @@ const ( // a transaction which fits into a message could possibly have. maxTxOutPerMessage = (MaxMessagePayload / minTxOutPayload) + 1 - // minTxPayload is the minimum payload size for a transaction. Note - // that any realistically usable transaction must have at least one - // input or output, but that is a rule enforced at a higher layer, so - // it is intentionally not included here. + // minTxPayload is the minimum payload size for any full encoded + // (prefix and witness transaction). Note that any realistically + // usable transaction must have at least one input or output, but + // that is a rule enforced at a higher layer, so it is intentionally + // not included here. // Version 4 bytes + Varint number of transaction inputs 1 byte + Varint - // number of transaction outputs 1 byte + LockTime 4 bytes + min input - // payload + min output payload. - minTxPayload = 10 + // number of transaction outputs 1 byte + Varint representing the number + // of transaction signatures + LockTime 4 bytes + Expiry 4 bytes + min + // input payload + min output payload. + minTxPayload = 4 + 1 + 1 + 1 + 4 + 4 ) -// OutPoint defines a bitcoin data type that is used to track previous -// transaction outputs. -type OutPoint struct { - Hash ShaHash - Index uint32 +// TxSerializeType is a uint16 representing the serialized type of transaction +// this msgTx is. You can use a bitmask for this too, but Decred just splits +// the int32 version into 2x uint16s so that you have: +// { +// uint16 type +// uint16 version +// } +type TxSerializeType uint16 + +const ( + TxSerializeFull = TxSerializeType(iota) + TxSerializeNoWitness + TxSerializeOnlyWitness + TxSerializeWitnessSigning + TxSerializeWitnessValueSigning +) + +// TODO replace all these with predeclared int32 or [4]byte cj +// DefaultMsgTxVersion returns the default version int32 (serialize the tx +// fully, version number 1). +func DefaultMsgTxVersion() int32 { + verBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint16(verBytes[0:2], TxVersion) + binary.LittleEndian.PutUint16(verBytes[2:4], uint16(TxSerializeFull)) + ver := binary.LittleEndian.Uint32(verBytes) + return int32(ver) } -// NewOutPoint returns a new bitcoin transaction outpoint point with the +// NoWitnessMsgTxVersion returns the witness free serializing int32 (serialize +// the tx without witness, version number 1). +func NoWitnessMsgTxVersion() int32 { + verBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint16(verBytes[0:2], TxVersion) + binary.LittleEndian.PutUint16(verBytes[2:4], uint16(TxSerializeNoWitness)) + ver := binary.LittleEndian.Uint32(verBytes) + return int32(ver) +} + +// WitnessOnlyMsgTxVersion returns the witness only version int32 (serialize +// the tx witness, version number 1). +func WitnessOnlyMsgTxVersion() int32 { + verBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint16(verBytes[0:2], TxVersion) + binary.LittleEndian.PutUint16(verBytes[2:4], uint16(TxSerializeOnlyWitness)) + ver := binary.LittleEndian.Uint32(verBytes) + return int32(ver) +} + +// WitnessSigningMsgTxVersion returns the witness only version int32 (serialize +// the tx witness for signing, version number 1). +func WitnessSigningMsgTxVersion() int32 { + verBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint16(verBytes[0:2], TxVersion) + binary.LittleEndian.PutUint16(verBytes[2:4], uint16(TxSerializeWitnessSigning)) + ver := binary.LittleEndian.Uint32(verBytes) + return int32(ver) +} + +// WitnessValueSigningMsgTxVersion returns the witness only version int32 +// (serialize the tx witness for signing with value, version number 1). +func WitnessValueSigningMsgTxVersion() int32 { + verBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint16(verBytes[0:2], TxVersion) + binary.LittleEndian.PutUint16(verBytes[2:4], + uint16(TxSerializeWitnessValueSigning)) + ver := binary.LittleEndian.Uint32(verBytes) + return int32(ver) +} + +// OutPoint defines a decred data type that is used to track previous +// transaction outputs. +type OutPoint struct { + Hash chainhash.Hash + Index uint32 + Tree int8 +} + +// NewOutPoint returns a new decred transaction outpoint point with the // provided hash and index. -func NewOutPoint(hash *ShaHash, index uint32) *OutPoint { +func NewOutPoint(hash *chainhash.Hash, index uint32, tree int8) *OutPoint { return &OutPoint{ Hash: *hash, Index: index, + Tree: tree, } } @@ -84,75 +180,125 @@ func (o OutPoint) String() string { // maximum message payload may increase in the future and this // optimization may go unnoticed, so allocate space for 10 decimal // digits, which will fit any uint32. - buf := make([]byte, 2*HashSize+1, 2*HashSize+1+10) + buf := make([]byte, 2*chainhash.HashSize+1, 2*chainhash.HashSize+1+10) copy(buf, o.Hash.String()) - buf[2*HashSize] = ':' + buf[2*chainhash.HashSize] = ':' buf = strconv.AppendUint(buf, uint64(o.Index), 10) return string(buf) } -// TxIn defines a bitcoin transaction input. +// TxIn defines a decred transaction input. type TxIn struct { + // Non-witness PreviousOutPoint OutPoint - SignatureScript []byte Sequence uint32 + + // Witness + ValueIn int64 + BlockHeight uint32 + BlockIndex uint32 + SignatureScript []byte } -// SerializeSize returns the number of bytes it would take to serialize the -// the transaction input. -func (t *TxIn) SerializeSize() int { - // Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Sequence 4 bytes + +// SerializeSizePrefix returns the number of bytes it would take to serialize +// the transaction input for a prefix. +func (t *TxIn) SerializeSizePrefix() int { + // Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Outpoint Tree 1 byte + + // Sequence 4 bytes. + return 41 +} + +// SerializeSizeWitness returns the number of bytes it would take to serialize the +// transaction input for a witness. +func (t *TxIn) SerializeSizeWitness() int { + // ValueIn (8 bytes) + BlockHeight (4 bytes) + BlockIndex (4 bytes) + // serialized varint size for the length of SignatureScript + // SignatureScript bytes. - return 40 + VarIntSerializeSize(uint64(len(t.SignatureScript))) + + return 8 + 4 + 4 + VarIntSerializeSize(uint64(len(t.SignatureScript))) + len(t.SignatureScript) } -// NewTxIn returns a new bitcoin transaction input with the provided +// SerializeSizeWitnessSigning returns the number of bytes it would take to +// serialize the transaction input for a witness used in signing. +func (t *TxIn) SerializeSizeWitnessSigning() int { + // Serialized varint size for the length of SignatureScript + + // SignatureScript bytes. + return VarIntSerializeSize(uint64(len(t.SignatureScript))) + + len(t.SignatureScript) +} + +// SerializeSizeWitnessValueSigning returns the number of bytes it would take to +// serialize the transaction input for a witness used in signing with value +// included. +func (t *TxIn) SerializeSizeWitnessValueSigning() int { + // ValueIn (8 bytes) + serialized varint size for the length of + // SignatureScript + SignatureScript bytes. + return 8 + VarIntSerializeSize(uint64(len(t.SignatureScript))) + + len(t.SignatureScript) +} + +// LegacySerializeSize returns the number of bytes it would take to serialize the +// the transaction input. +func (t *TxIn) LegacySerializeSize() int { + // Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Sequence 4 bytes + + // serialized varint size for the length of SignatureScript + + // SignatureScript bytes. + return 41 + VarIntSerializeSize(uint64(len(t.SignatureScript))) + + len(t.SignatureScript) +} + +// NewTxIn returns a new decred transaction input with the provided // previous outpoint point and signature script with a default sequence of // MaxTxInSequenceNum. func NewTxIn(prevOut *OutPoint, signatureScript []byte) *TxIn { return &TxIn{ PreviousOutPoint: *prevOut, - SignatureScript: signatureScript, Sequence: MaxTxInSequenceNum, + SignatureScript: signatureScript, + ValueIn: NullValueIn, + BlockHeight: NullBlockHeight, + BlockIndex: NullBlockIndex, } } -// TxOut defines a bitcoin transaction output. +// TxOut defines a decred transaction output. type TxOut struct { Value int64 + Version uint16 PkScript []byte } // SerializeSize returns the number of bytes it would take to serialize the // the transaction output. func (t *TxOut) SerializeSize() int { - // Value 8 bytes + serialized varint size for the length of PkScript + - // PkScript bytes. - return 8 + VarIntSerializeSize(uint64(len(t.PkScript))) + len(t.PkScript) + // Value 8 bytes + Version 2 bytes + serialized varint size for + // the length of PkScript + PkScript bytes. + return 8 + 2 + VarIntSerializeSize(uint64(len(t.PkScript))) + len(t.PkScript) } -// NewTxOut returns a new bitcoin transaction output with the provided +// NewTxOut returns a new decred transaction output with the provided // transaction value and public key script. func NewTxOut(value int64, pkScript []byte) *TxOut { return &TxOut{ Value: value, + Version: DefaultPkScriptVersion, PkScript: pkScript, } } -// MsgTx implements the Message interface and represents a bitcoin tx message. +// MsgTx implements the Message interface and represents a decred tx message. // It is used to deliver transaction information in response to a getdata // message (MsgGetData) for a given transaction. // // Use the AddTxIn and AddTxOut functions to build up the list of transaction // inputs and outputs. type MsgTx struct { - Version int32 - TxIn []*TxIn - TxOut []*TxOut - LockTime uint32 + CachedHash *chainhash.Hash + Version int32 + TxIn []*TxIn + TxOut []*TxOut + LockTime uint32 + Expiry uint32 } // AddTxIn adds a transaction input to the message. @@ -165,15 +311,161 @@ func (msg *MsgTx) AddTxOut(to *TxOut) { msg.TxOut = append(msg.TxOut, to) } -// TxSha generates the ShaHash name for the transaction. -func (msg *MsgTx) TxSha() ShaHash { +// msgTxVersionToBytes converts an int32 version into a 4 byte slice. +func msgTxVersionToBytes(version int32) []byte { + mVerBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint32(mVerBytes[0:4], uint32(version)) + return mVerBytes +} + +// msgTxVersionDecode converts an int32 version into serialization types and +// actual version. +func msgTxVersionToVars(version int32) (uint16, TxSerializeType) { + mVerBytes := make([]byte, 4, 4) + binary.LittleEndian.PutUint32(mVerBytes[0:4], uint32(version)) + mVer := binary.LittleEndian.Uint16(mVerBytes[0:2]) + mType := binary.LittleEndian.Uint16(mVerBytes[2:4]) + return mVer, TxSerializeType(mType) +} + +// msgTxVersionDecode converts a 4 byte slice into an int32 version. +func msgTxVersionDecode(verBytes []byte) (int32, error) { + if len(verBytes) != 4 { + return 0, messageError("msgTxVersionDecode", "tx version wrong size") + } + ver := binary.LittleEndian.Uint32(verBytes) + + return int32(ver), nil +} + +// shallowCopyForSerializing make a shallow copy of a tx with a new +// version, so that it can be hashed or serialized accordingly. +func (msg *MsgTx) shallowCopyForSerializing(version int32) *MsgTx { + return &MsgTx{ + Version: version, + TxIn: msg.TxIn, + TxOut: msg.TxOut, + LockTime: msg.LockTime, + Expiry: msg.Expiry, + } +} + +// TxSha generates the Hash name for the transaction prefix. +func (msg *MsgTx) TxSha() chainhash.Hash { + // Encode the transaction and calculate double sha256 on the result. + // Ignore the error returns since the only way the encode could fail + // is being out of memory or due to nil pointers, both of which would + // cause a run-time panic. + + // TxSha should always calculate a non-witnessed hash. + mtxCopy := msg.shallowCopyForSerializing(NoWitnessMsgTxVersion()) + + buf := bytes.NewBuffer(make([]byte, 0, mtxCopy.SerializeSize())) + _ = mtxCopy.Serialize(buf) + + return chainhash.HashFuncH(buf.Bytes()) +} + +// CachedTxSha generates the Hash name for the transaction prefix and stores +// it if it does not exist. The cached hash is then returned. It can be +// recalculated later with RecacheTxSha. +func (msg *MsgTx) CachedTxSha() *chainhash.Hash { + if msg.CachedHash == nil { + h := msg.TxSha() + msg.CachedHash = &h + } + + return msg.CachedHash +} + +// RecacheTxSha generates the Hash name for the transaction prefix and stores +// it. The cached hash is then returned. +func (msg *MsgTx) RecacheTxSha() *chainhash.Hash { + h := msg.TxSha() + msg.CachedHash = &h + + return msg.CachedHash +} + +// TxShaWitness generates the Hash name for the transaction witness. +func (msg *MsgTx) TxShaWitness() chainhash.Hash { + // Encode the transaction and calculate double sha256 on the result. + // Ignore the error returns since the only way the encode could fail + // is being out of memory or due to nil pointers, both of which would + // cause a run-time panic. + + // TxShaWitness should always calculate a witnessed hash. + mtxCopy := msg.shallowCopyForSerializing(WitnessOnlyMsgTxVersion()) + + buf := bytes.NewBuffer(make([]byte, 0, mtxCopy.SerializeSize())) + _ = mtxCopy.Serialize(buf) + + return chainhash.HashFuncH(buf.Bytes()) +} + +// TxShaWitnessSigning generates the Hash name for the transaction witness with +// the malleable portions (AmountIn, BlockHeight, BlockIndex) removed. These are +// verified and set by the miner instead. +func (msg *MsgTx) TxShaWitnessSigning() chainhash.Hash { + // Encode the transaction and calculate double sha256 on the result. + // Ignore the error returns since the only way the encode could fail + // is being out of memory or due to nil pointers, both of which would + // cause a run-time panic. + + // TxShaWitness should always calculate a witnessed hash. + mtxCopy := msg.shallowCopyForSerializing(WitnessSigningMsgTxVersion()) + + buf := bytes.NewBuffer(make([]byte, 0, mtxCopy.SerializeSize())) + _ = mtxCopy.Serialize(buf) + + return chainhash.HashFuncH(buf.Bytes()) +} + +// TxShaWitnessValueSigning generates the Hash name for the transaction witness +// with BlockHeight and BlockIndex removed, allowing the signer to specify the +// ValueIn. +func (msg *MsgTx) TxShaWitnessValueSigning() chainhash.Hash { + // Encode the transaction and calculate double sha256 on the result. + // Ignore the error returns since the only way the encode could fail + // is being out of memory or due to nil pointers, both of which would + // cause a run-time panic. + + // TxShaWitness should always calculate a witnessed hash. + mtxCopy := msg.shallowCopyForSerializing(WitnessValueSigningMsgTxVersion()) + + buf := bytes.NewBuffer(make([]byte, 0, mtxCopy.SerializeSize())) + _ = mtxCopy.Serialize(buf) + + return chainhash.HashFuncH(buf.Bytes()) +} + +// TxShaFull generates the Hash name for the transaction prefix || witness. It +// first obtains the hashes for both the transaction prefix and witness, then +// concatenates them and hashes these 64 bytes. +// Note that the inputs to the hashes, serialized prefix and serialized witnesses, +// have different uint32 versions because version is now actually two uint16s, +// with the last 16 bits referring to the serialization type. The first 16 bits +// refer to the actual version, and these must be the same in both serializations. +func (msg *MsgTx) TxShaFull() chainhash.Hash { + concat := make([]byte, 64, 64) + prefixHash := msg.TxSha() + witnessHash := msg.TxShaWitness() + copy(concat[0:32], prefixHash[:]) + copy(concat[32:64], witnessHash[:]) + + return chainhash.HashFuncH(concat) +} + +// TxShaLegacy generates the legacy transaction hash, for software +// compatibility. +func (msg *MsgTx) TxShaLegacy() chainhash.Hash { // Encode the transaction and calculate double sha256 on the result. // Ignore the error returns since the only way the encode could fail // is being out of memory or due to nil pointers, both of which would // cause a run-time panic. buf := bytes.NewBuffer(make([]byte, 0, msg.SerializeSize())) - _ = msg.Serialize(buf) - return DoubleSha256SH(buf.Bytes()) + _ = msg.LegacySerialize(buf) + return chainhash.HashFuncH(buf.Bytes()) } // Copy creates a deep copy of a transaction so that the original does not get @@ -186,6 +478,7 @@ func (msg *MsgTx) Copy() *MsgTx { TxIn: make([]*TxIn, 0, len(msg.TxIn)), TxOut: make([]*TxOut, 0, len(msg.TxOut)), LockTime: msg.LockTime, + Expiry: msg.Expiry, } // Deep copy the old TxIn data. @@ -195,6 +488,7 @@ func (msg *MsgTx) Copy() *MsgTx { newOutPoint := OutPoint{} newOutPoint.Hash.SetBytes(oldOutPoint.Hash[:]) newOutPoint.Index = oldOutPoint.Index + newOutPoint.Tree = oldOutPoint.Tree // Deep copy the old signature script. var newScript []byte @@ -209,8 +503,11 @@ func (msg *MsgTx) Copy() *MsgTx { // new Tx. newTxIn := TxIn{ PreviousOutPoint: newOutPoint, - SignatureScript: newScript, Sequence: oldTxIn.Sequence, + ValueIn: oldTxIn.ValueIn, + BlockHeight: oldTxIn.BlockHeight, + BlockIndex: oldTxIn.BlockIndex, + SignatureScript: newScript, } newTx.TxIn = append(newTx.TxIn, &newTxIn) } @@ -230,6 +527,7 @@ func (msg *MsgTx) Copy() *MsgTx { // new Tx. newTxOut := TxOut{ Value: oldTxOut.Value, + Version: oldTxOut.Version, PkScript: newScript, } newTx.TxOut = append(newTx.TxOut, &newTxOut) @@ -238,7 +536,221 @@ func (msg *MsgTx) Copy() *MsgTx { return &newTx } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// decodePrefix decodes a transaction prefix and stores the contents +// in the embedded msgTx. +func (msg *MsgTx) decodePrefix(r io.Reader, pver uint32) error { + count, err := readVarInt(r, pver) + if err != nil { + return err + } + + // Prevent more input transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxInPerMessage) { + str := fmt.Sprintf("too many input transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxInPerMessage) + return messageError("MsgTx.decodePrefix", str) + } + + // TxIns. + msg.TxIn = make([]*TxIn, count) + for i := uint64(0); i < count; i++ { + ti := TxIn{} + err = readTxInPrefix(r, pver, msg.Version, &ti) + if err != nil { + return err + } + msg.TxIn[i] = &ti + } + + count, err = readVarInt(r, pver) + if err != nil { + return err + } + + // Prevent more output transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxOutPerMessage) { + str := fmt.Sprintf("too many output transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxOutPerMessage) + return messageError("MsgTx.decodePrefix", str) + } + + // TxOuts. + msg.TxOut = make([]*TxOut, count) + for i := uint64(0); i < count; i++ { + to := TxOut{} + err = readTxOut(r, pver, msg.Version, &to) + if err != nil { + return err + } + msg.TxOut[i] = &to + } + + // Locktime and expiry. + var buf [4]byte + _, err = io.ReadFull(r, buf[:]) + if err != nil { + return err + } + msg.LockTime = binary.LittleEndian.Uint32(buf[:]) + _, err = io.ReadFull(r, buf[:]) + if err != nil { + return err + } + msg.Expiry = binary.LittleEndian.Uint32(buf[:]) + + return nil +} + +func (msg *MsgTx) decodeWitness(r io.Reader, pver uint32, isFull bool) error { + // Witness only; generate the TxIn list and fill out only the + // sigScripts. + if !isFull { + count, err := readVarInt(r, pver) + if err != nil { + return err + } + + // Prevent more input transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxInPerMessage) { + str := fmt.Sprintf("too many input transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxInPerMessage) + return messageError("MsgTx.decodeWitness", str) + } + + msg.TxIn = make([]*TxIn, count) + for i := uint64(0); i < count; i++ { + ti := TxIn{} + err = readTxInWitness(r, pver, msg.Version, &ti) + if err != nil { + return err + } + msg.TxIn[i] = &ti + } + msg.TxOut = make([]*TxOut, 0) + } else { + // We're decoding witnesses from a full transaction, so read in + // the number of signature scripts, check to make sure it's the + // same as the number of TxIns we currently have, then fill in + // the signature scripts. + count, err := readVarInt(r, pver) + if err != nil { + return err + } + + // Don't allow the deserializer to panic by accessing memory + // that doesn't exist. + if int(count) != len(msg.TxIn) { + str := fmt.Sprintf("non equal witness and prefix txin quantities "+ + "(witness %v, prefix %v)", count, + len(msg.TxIn)) + return messageError("MsgTx.decodeWitness", str) + } + + // Prevent more input transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxInPerMessage) { + str := fmt.Sprintf("too many input transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxInPerMessage) + return messageError("MsgTx.decodeWitness", str) + } + + // Read in the witnesses, and copy them into the already generated + // by decodePrefix TxIns. + for i := uint64(0); i < count; i++ { + ti := TxIn{} + err = readTxInWitness(r, pver, msg.Version, &ti) + if err != nil { + return err + } + + msg.TxIn[i].ValueIn = ti.ValueIn + msg.TxIn[i].BlockHeight = ti.BlockHeight + msg.TxIn[i].BlockIndex = ti.BlockIndex + msg.TxIn[i].SignatureScript = ti.SignatureScript + } + } + + return nil +} + +// decodeWitnessSigning decodes a witness for signing. +func (msg *MsgTx) decodeWitnessSigning(r io.Reader, pver uint32) error { + // Witness only for signing; generate the TxIn list and fill out only the + // sigScripts. + count, err := readVarInt(r, pver) + if err != nil { + return err + } + + // Prevent more input transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxInPerMessage) { + str := fmt.Sprintf("too many input transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxInPerMessage) + return messageError("MsgTx.decodeWitness", str) + } + + msg.TxIn = make([]*TxIn, count) + for i := uint64(0); i < count; i++ { + ti := TxIn{} + err = readTxInWitnessSigning(r, pver, msg.Version, &ti) + if err != nil { + return err + } + msg.TxIn[i] = &ti + } + msg.TxOut = make([]*TxOut, 0) + + return nil +} + +// decodeWitnessValueSigning decodes a witness for signing with value. +func (msg *MsgTx) decodeWitnessValueSigning(r io.Reader, pver uint32) error { + // Witness only for signing; generate the TxIn list and fill out only the + // sigScripts. + count, err := readVarInt(r, pver) + if err != nil { + return err + } + + // Prevent more input transactions than could possibly fit into a + // message. It would be possible to cause memory exhaustion and panics + // without a sane upper bound on this count. + if count > uint64(maxTxInPerMessage) { + str := fmt.Sprintf("too many input transactions to fit into "+ + "max message size [count %d, max %d]", count, + maxTxInPerMessage) + return messageError("MsgTx.decodeWitness", str) + } + + msg.TxIn = make([]*TxIn, count) + for i := uint64(0); i < count; i++ { + ti := TxIn{} + err = readTxInWitnessValueSigning(r, pver, msg.Version, &ti) + if err != nil { + return err + } + msg.TxIn[i] = &ti + } + msg.TxOut = make([]*TxOut, 0) + + return nil +} + +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. // See Deserialize for decoding transactions stored to disk, such as in a // database, as opposed to decoding transactions from the wire. @@ -249,6 +761,58 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error { return err } msg.Version = int32(binary.LittleEndian.Uint32(buf[:])) + _, mType := msgTxVersionToVars(msg.Version) + + switch { + case mType == TxSerializeNoWitness: + err := msg.decodePrefix(r, pver) + if err != nil { + return err + } + + case mType == TxSerializeOnlyWitness: + err := msg.decodeWitness(r, pver, false) + if err != nil { + return err + } + + case mType == TxSerializeWitnessSigning: + err := msg.decodeWitnessSigning(r, pver) + if err != nil { + return err + } + + case mType == TxSerializeWitnessValueSigning: + err := msg.decodeWitnessValueSigning(r, pver) + if err != nil { + return err + } + + case mType == TxSerializeFull: + err := msg.decodePrefix(r, pver) + if err != nil { + return err + } + err = msg.decodeWitness(r, pver, true) + if err != nil { + return err + } + default: + return messageError("MsgTx.BtcDecode", "unsupported transaction type") + } + + return nil +} + +// LegacyBtcDecode decodes r using the decred protocol encoding into the +// receiver. This is used for the decoding of legacy serialized transactions. +func (msg *MsgTx) LegacyBtcDecode(r io.Reader, pver uint32) error { + var buf [4]byte + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return err + } + msg.Version = int32(binary.LittleEndian.Uint32(buf[:])) count, err := readVarInt(r, pver) if err != nil { @@ -268,7 +832,7 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error { msg.TxIn = make([]*TxIn, count) for i := uint64(0); i < count; i++ { ti := TxIn{} - err = readTxIn(r, pver, msg.Version, &ti) + err = legacyReadTxIn(r, pver, msg.Version, &ti) if err != nil { return err } @@ -293,7 +857,7 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error { msg.TxOut = make([]*TxOut, count) for i := uint64(0); i < count; i++ { to := TxOut{} - err = readTxOut(r, pver, msg.Version, &to) + err = legacyReadTxOut(r, pver, msg.Version, &to) if err != nil { return err } @@ -312,7 +876,7 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error { // Deserialize decodes a transaction from r into the receiver using a format // that is suitable for long-term storage such as a database while respecting // the Version field in the transaction. This function differs from BtcDecode -// in that BtcDecode decodes from the bitcoin wire protocol as it was sent +// in that BtcDecode decodes from the Decred wire protocol as it was sent // across the network. The wire encoding can technically differ depending on // the protocol version and doesn't even really need to match the format of a // stored transaction at all. As of the time this comment was written, the @@ -326,26 +890,32 @@ func (msg *MsgTx) Deserialize(r io.Reader) error { return msg.BtcDecode(r, 0) } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. -// This is part of the Message interface implementation. -// See Serialize for encoding transactions to be stored to disk, such as in a -// database, as opposed to encoding transactions for the wire. -func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error { - var buf [4]byte - binary.LittleEndian.PutUint32(buf[:], uint32(msg.Version)) - _, err := w.Write(buf[:]) - if err != nil { - return err - } +// LegacyDeserialize decodes a transaction that has been encoded in the legacy +// Decred format. +func (msg *MsgTx) LegacyDeserialize(r io.Reader) error { + // At the current time, there is no difference between the wire encoding + // at protocol version 0 and the stable long-term storage format. As + // a result, make use of BtcDecode. + return msg.LegacyBtcDecode(r, 0) +} +// FromBytes deserializes a transaction byte slice. +func (msg *MsgTx) FromBytes(b []byte) error { + r := bytes.NewReader(b) + return msg.Deserialize(r) +} + +// encodePrefix encodes a transaction prefix into a writer. +func (msg *MsgTx) encodePrefix(w io.Writer, pver uint32) error { + var buf [4]byte count := uint64(len(msg.TxIn)) - err = writeVarInt(w, pver, count) + err := writeVarInt(w, pver, count) if err != nil { return err } for _, ti := range msg.TxIn { - err = writeTxIn(w, pver, msg.Version, ti) + err = writeTxInPrefix(w, pver, msg.Version, ti) if err != nil { return err } @@ -370,13 +940,181 @@ func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error { return err } + binary.LittleEndian.PutUint32(buf[:], msg.Expiry) + _, err = w.Write(buf[:]) + if err != nil { + return err + } + + return nil +} + +// encodeWitness encodes a transaction witness into a writer. +func (msg *MsgTx) encodeWitness(w io.Writer, pver uint32) error { + count := uint64(len(msg.TxIn)) + err := writeVarInt(w, pver, count) + if err != nil { + return err + } + + for _, ti := range msg.TxIn { + err = writeTxInWitness(w, pver, msg.Version, ti) + if err != nil { + return err + } + } + + return nil +} + +// encodeWitnessSigning encodes a transaction witness into a writer for signing. +func (msg *MsgTx) encodeWitnessSigning(w io.Writer, pver uint32) error { + count := uint64(len(msg.TxIn)) + err := writeVarInt(w, pver, count) + if err != nil { + return err + } + + for _, ti := range msg.TxIn { + err = writeTxInWitnessSigning(w, pver, msg.Version, ti) + if err != nil { + return err + } + } + + return nil +} + +// encodeWitnessValueSigning encodes a transaction witness into a writer for +// signing, with the value included. +func (msg *MsgTx) encodeWitnessValueSigning(w io.Writer, pver uint32) error { + count := uint64(len(msg.TxIn)) + err := writeVarInt(w, pver, count) + if err != nil { + return err + } + + for _, ti := range msg.TxIn { + err = writeTxInWitnessValueSigning(w, pver, msg.Version, ti) + if err != nil { + return err + } + } + + return nil +} + +// Encode encodes the receiver to w using the Decred protocol encoding. +// This is part of the Message interface implementation. +// See Serialize for encoding transactions to be stored to disk, such as in a +// database, as opposed to encoding transactions for the wire. +func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], uint32(msg.Version)) + _, mType := msgTxVersionToVars(msg.Version) + + _, err := w.Write(buf[:]) + if err != nil { + return err + } + + switch { + case mType == TxSerializeNoWitness: + err := msg.encodePrefix(w, pver) + if err != nil { + return err + } + + case mType == TxSerializeOnlyWitness: + err := msg.encodeWitness(w, pver) + if err != nil { + return err + } + + case mType == TxSerializeWitnessSigning: + err := msg.encodeWitnessSigning(w, pver) + if err != nil { + return err + } + + case mType == TxSerializeWitnessValueSigning: + err := msg.encodeWitnessValueSigning(w, pver) + if err != nil { + return err + } + + case mType == TxSerializeFull: + err := msg.encodePrefix(w, pver) + if err != nil { + return err + } + err = msg.encodeWitness(w, pver) + if err != nil { + return err + } + + default: + return messageError("MsgTx.BtcEncode", "unsupported transaction type") + } + + return nil +} + +// Encode encodes the receiver to w using the Decred protocol encoding. +// This is for transactions encoded in the legacy encoding, for compatibility. +func (msg *MsgTx) LegacyBtcEncode(w io.Writer, pver uint32) error { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], uint32(msg.Version)) + _, err := w.Write(buf[:]) + if err != nil { + return err + } + + count := uint64(len(msg.TxIn)) + err = writeVarInt(w, pver, count) + if err != nil { + return err + } + + for _, ti := range msg.TxIn { + err = legacyWriteTxIn(w, pver, msg.Version, ti) + if err != nil { + return err + } + } + + count = uint64(len(msg.TxOut)) + err = writeVarInt(w, pver, count) + if err != nil { + return err + } + + for _, to := range msg.TxOut { + err = legacyWriteTxOut(w, pver, msg.Version, to) + if err != nil { + return err + } + } + + binary.LittleEndian.PutUint32(buf[:], msg.LockTime) + _, err = w.Write(buf[:]) + if err != nil { + return err + } + + binary.LittleEndian.PutUint32(buf[:], msg.Expiry) + _, err = w.Write(buf[:]) + if err != nil { + return err + } + return nil } // Serialize encodes the transaction to w using a format that suitable for // long-term storage such as a database while respecting the Version field in // the transaction. This function differs from BtcEncode in that BtcEncode -// encodes the transaction to the bitcoin wire protocol in order to be sent +// encodes the transaction to the decred wire protocol in order to be sent // across the network. The wire encoding can technically differ depending on // the protocol version and doesn't even really need to match the format of a // stored transaction at all. As of the time this comment was written, the @@ -388,19 +1126,134 @@ func (msg *MsgTx) Serialize(w io.Writer) error { // at protocol version 0 and the stable long-term storage format. As // a result, make use of BtcEncode. return msg.BtcEncode(w, 0) +} +// LegacySerialize encodes the transaction in decred legacy format, for +// compatibility. +func (msg *MsgTx) LegacySerialize(w io.Writer) error { + // At the current time, there is no difference between the wire encoding + // at protocol version 0 and the stable long-term storage format. As + // a result, make use of BtcEncode. + return msg.LegacyBtcEncode(w, 0) +} + +// Bytes returns the serialized form of the transaction in bytes. +func (msg *MsgTx) Bytes() ([]byte, error) { + // Serialize the MsgTx. + var w bytes.Buffer + err := msg.Serialize(&w) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +// Bytes returns the serialized form of the transaction prefix in bytes. +func (msg *MsgTx) BytesPrefix() ([]byte, error) { + mtxCopy := msg.shallowCopyForSerializing(NoWitnessMsgTxVersion()) + + var w bytes.Buffer + err := mtxCopy.Serialize(&w) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +// Bytes returns the serialized form of the transaction prefix in bytes. +func (msg *MsgTx) BytesWitness() ([]byte, error) { + mtxCopy := msg.shallowCopyForSerializing(WitnessOnlyMsgTxVersion()) + + var w bytes.Buffer + err := mtxCopy.Serialize(&w) + if err != nil { + return nil, err + } + return w.Bytes(), nil } // SerializeSize returns the number of bytes it would take to serialize the // the transaction. func (msg *MsgTx) SerializeSize() int { - // Version 4 bytes + LockTime 4 bytes + Serialized varint size for the - // number of transaction inputs and outputs. - n := 8 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + _, mType := msgTxVersionToVars(msg.Version) + + // Unknown type return 0. + n := 0 + switch { + case mType == TxSerializeNoWitness: + // Version 4 bytes + LockTime 4 bytes + Expiry 4 bytes + + // Serialized varint size for the number of transaction + // inputs and outputs. + n = 12 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + VarIntSerializeSize(uint64(len(msg.TxOut))) + + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizePrefix() + } + for _, txOut := range msg.TxOut { + n += txOut.SerializeSize() + } + + case mType == TxSerializeOnlyWitness: + // Version 4 bytes + Serialized varint size for the + // number of transaction signatures. + n = 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizeWitness() + } + + case mType == TxSerializeWitnessSigning: + // Version 4 bytes + Serialized varint size for the + // number of transaction signatures. + n = 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizeWitnessSigning() + } + + case mType == TxSerializeWitnessValueSigning: + // Version 4 bytes + Serialized varint size for the + // number of transaction signatures. + n = 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizeWitnessValueSigning() + } + + case mType == TxSerializeFull: + // Version 4 bytes + LockTime 4 bytes + Expiry 4 bytes + Serialized + // varint size for the number of transaction inputs (x2) and + // outputs. The number of inputs is added twice because it's + // encoded once in both the witness and the prefix. + n = 12 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + VarIntSerializeSize(uint64(len(msg.TxIn))) + + VarIntSerializeSize(uint64(len(msg.TxOut))) + + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizePrefix() + } + for _, txIn := range msg.TxIn { + n += txIn.SerializeSizeWitness() + } + for _, txOut := range msg.TxOut { + n += txOut.SerializeSize() + } + } + + return n +} + +// SerializeSize returns the number of bytes it would take to serialize the +// the transaction. +func (msg *MsgTx) LegacySerializeSize() int { + // Version 4 bytes + LockTime 4 bytes + Expiry 4 bytes + Serialized + // varint size for the number of transaction inputs and outputs. + n := 12 + VarIntSerializeSize(uint64(len(msg.TxIn))) + VarIntSerializeSize(uint64(len(msg.TxOut))) for _, txIn := range msg.TxIn { - n += txIn.SerializeSize() + n += txIn.LegacySerializeSize() } for _, txOut := range msg.TxOut { @@ -426,7 +1279,10 @@ func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 { // within the raw serialized transaction. The caller can easily obtain the // length of each script by using len on the script available via the // appropriate transaction output entry. +// TODO: Make this work for all serialization types, not just the full +// serialization type. func (msg *MsgTx) PkScriptLocs() []int { + // Return nil for witness-only tx. numTxOut := len(msg.TxOut) if numTxOut == 0 { return nil @@ -441,7 +1297,44 @@ func (msg *MsgTx) PkScriptLocs() []int { n := 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) + VarIntSerializeSize(uint64(numTxOut)) for _, txIn := range msg.TxIn { - n += txIn.SerializeSize() + n += txIn.SerializeSizePrefix() + } + + // Calculate and set the appropriate offset for each public key script. + pkScriptLocs := make([]int, numTxOut) + for i, txOut := range msg.TxOut { + // The offset of the script in the transaction output is: + // + // Value 8 bytes + version 2 bytes + serialized varint size + // for the length of PkScript. + n += 8 + 2 + VarIntSerializeSize(uint64(len(txOut.PkScript))) + pkScriptLocs[i] = n + n += len(txOut.PkScript) + } + + return pkScriptLocs +} + +// LegacyPkScriptLocs returns a slice containing the start of each public key +// script within the raw serialized transaction. The caller can easily obtain +// the length of each script by using len on the script available via the +// appropriate transaction output entry. This is for legacy decred format. +func (msg *MsgTx) LegacyPkScriptLocs() []int { + numTxOut := len(msg.TxOut) + if numTxOut == 0 { + return nil + } + + // The starting offset in the serialized transaction of the first + // transaction output is: + // + // Version 4 bytes + serialized varint size for the number of + // transaction inputs and outputs + serialized size of each transaction + // input. + n := 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) + + VarIntSerializeSize(uint64(numTxOut)) + for _, txIn := range msg.TxIn { + n += txIn.LegacySerializeSize() } // Calculate and set the appropriate offset for each public key script. @@ -459,21 +1352,21 @@ func (msg *MsgTx) PkScriptLocs() []int { return pkScriptLocs } -// NewMsgTx returns a new bitcoin tx message that conforms to the Message +// NewMsgTx returns a new decred tx message that conforms to the Message // interface. The return instance has a default version of TxVersion and there // are no transaction inputs or outputs. Also, the lock time is set to zero // to indicate the transaction is valid immediately as opposed to some time in // future. func NewMsgTx() *MsgTx { return &MsgTx{ - Version: TxVersion, + Version: DefaultMsgTxVersion(), TxIn: make([]*TxIn, 0, defaultTxInOutAlloc), TxOut: make([]*TxOut, 0, defaultTxInOutAlloc), } } -// readOutPoint reads the next sequence of bytes from r as an OutPoint. -func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error { +// ReadOutPoint reads the next sequence of bytes from r as an OutPoint. +func ReadOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error { _, err := io.ReadFull(r, op.Hash[:]) if err != nil { return err @@ -485,12 +1378,20 @@ func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error { return err } op.Index = binary.LittleEndian.Uint32(buf[:]) + + var bufTree [1]byte + _, err = io.ReadFull(r, bufTree[:]) + if err != nil { + return err + } + op.Tree = int8(bufTree[0]) + return nil } -// writeOutPoint encodes op to the bitcoin protocol encoding for an OutPoint +// WriteOutPoint encodes op to the decred protocol encoding for an OutPoint // to w. -func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error { +func WriteOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error { _, err := w.Write(op.Hash[:]) if err != nil { return err @@ -502,14 +1403,164 @@ func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error if err != nil { return err } + + var bufTree [1]byte + bufTree[0] = byte(op.Tree) + _, err = w.Write(bufTree[:]) + if err != nil { + return err + } + return nil } -// readTxIn reads the next sequence of bytes from r as a transaction input -// (TxIn). -func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error { +// legacyReadOutPoint reads the next sequence of bytes from r as a legacy +// Decred OutPoint. +func legacyReadOutPoint(r io.Reader, pver uint32, version int32, + op *OutPoint) error { + _, err := io.ReadFull(r, op.Hash[:]) + if err != nil { + return err + } + + var buf [4]byte + _, err = io.ReadFull(r, buf[:]) + if err != nil { + return err + } + op.Index = binary.LittleEndian.Uint32(buf[:]) + + return nil +} + +// legacyWriteOutPoint encodes op to the decred protocol encoding for a legacy +// Decred OutPoint to w. +func legacyWriteOutPoint(w io.Writer, pver uint32, version int32, + op *OutPoint) error { + _, err := w.Write(op.Hash[:]) + if err != nil { + return err + } + + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], op.Index) + _, err = w.Write(buf[:]) + if err != nil { + return err + } + + return nil +} + +// readTxInPrefix reads the next sequence of bytes from r as a transaction input +// (TxIn) in the transaction prefix. +func readTxInPrefix(r io.Reader, pver uint32, version int32, ti *TxIn) error { + if version == WitnessOnlyMsgTxVersion() { + return messageError("readTxInPrefix", + "tried to read a prefix input for a witness only tx") + } + + // Outpoint. var op OutPoint - err := readOutPoint(r, pver, version, &op) + err := ReadOutPoint(r, pver, version, &op) + if err != nil { + return err + } + ti.PreviousOutPoint = op + + // Sequence. + var buf4 [4]byte + _, err = io.ReadFull(r, buf4[:]) + if err != nil { + return err + } + ti.Sequence = binary.LittleEndian.Uint32(buf4[:]) + + return nil +} + +// readTxInWitness reads the next sequence of bytes from r as a transaction input +// (TxIn) in the transaction witness. +func readTxInWitness(r io.Reader, pver uint32, version int32, ti *TxIn) error { + var err error + + // ValueIn. + var buf8 [8]byte + _, err = io.ReadFull(r, buf8[:]) + if err != nil { + return err + } + ti.ValueIn = int64(binary.LittleEndian.Uint64(buf8[:])) + + // BlockHeight. + var buf4 [4]byte + _, err = io.ReadFull(r, buf4[:]) + if err != nil { + return err + } + ti.BlockHeight = binary.LittleEndian.Uint32(buf4[:]) + + // BlockIndex. + _, err = io.ReadFull(r, buf4[:]) + if err != nil { + return err + } + ti.BlockIndex = binary.LittleEndian.Uint32(buf4[:]) + + // Signature script. + ti.SignatureScript, err = readVarBytes(r, pver, MaxMessagePayload, + "transaction input signature script") + if err != nil { + return err + } + + return nil +} + +// readTxInWitnessSigning reads a TxIn witness for signing. +func readTxInWitnessSigning(r io.Reader, pver uint32, version int32, + ti *TxIn) error { + var err error + + // Signature script. + ti.SignatureScript, err = readVarBytes(r, pver, MaxMessagePayload, + "transaction input signature script") + if err != nil { + return err + } + + return nil +} + +// readTxInWitnessValueSigning reads a TxIn witness for signing with value +// included. +func readTxInWitnessValueSigning(r io.Reader, pver uint32, version int32, + ti *TxIn) error { + var err error + + // ValueIn. + var buf8 [8]byte + _, err = io.ReadFull(r, buf8[:]) + if err != nil { + return err + } + ti.ValueIn = int64(binary.LittleEndian.Uint64(buf8[:])) + + // Signature script. + ti.SignatureScript, err = readVarBytes(r, pver, MaxMessagePayload, + "transaction input signature script") + if err != nil { + return err + } + + return nil +} + +// readTxInPrefix reads the next sequence of bytes from r as a transaction input +// (TxIn) in the transaction prefix. +func legacyReadTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error { + var op OutPoint + err := legacyReadOutPoint(r, pver, version, &op) if err != nil { return err } @@ -531,10 +1582,10 @@ func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error { return nil } -// writeTxIn encodes ti to the bitcoin protocol encoding for a transaction -// input (TxIn) to w. -func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error { - err := writeOutPoint(w, pver, version, &ti.PreviousOutPoint) +// legacyWriteTxIn encodes ti to the decred protocol encoding for a transaction +// input (TxIn) to w for decred legacy format. +func legacyWriteTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error { + err := legacyWriteOutPoint(w, pver, version, &ti.PreviousOutPoint) if err != nil { return err } @@ -554,9 +1605,150 @@ func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error { return nil } +// writeTxInPrefixs encodes ti to the decred protocol encoding for a transaction +// input (TxIn) prefix to w. +func writeTxInPrefix(w io.Writer, pver uint32, version int32, ti *TxIn) error { + err := WriteOutPoint(w, pver, version, &ti.PreviousOutPoint) + if err != nil { + return err + } + + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], ti.Sequence) + _, err = w.Write(buf[:]) + if err != nil { + return err + } + + return nil +} + +// writeTxWitness encodes ti to the decred protocol encoding for a transaction +// input (TxIn) witness to w. +func writeTxInWitness(w io.Writer, pver uint32, version int32, ti *TxIn) error { + // ValueIn. + var buf8 [8]byte + binary.LittleEndian.PutUint64(buf8[:], uint64(ti.ValueIn)) + _, err := w.Write(buf8[:]) + if err != nil { + return err + } + + // BlockHeight. + var buf4 [4]byte + binary.LittleEndian.PutUint32(buf4[:], ti.BlockHeight) + _, err = w.Write(buf4[:]) + if err != nil { + return err + } + + // BlockIndex. + binary.LittleEndian.PutUint32(buf4[:], ti.BlockIndex) + _, err = w.Write(buf4[:]) + if err != nil { + return err + } + + // Write the signature script. + err = writeVarBytes(w, pver, ti.SignatureScript) + if err != nil { + return err + } + + return nil +} + +// writeTxInWitnessSigning encodes ti to the decred protocol encoding for a +// transaction input (TxIn) witness to w for signing. +func writeTxInWitnessSigning(w io.Writer, pver uint32, version int32, + ti *TxIn) error { + var err error + + // Only write the signature script. + err = writeVarBytes(w, pver, ti.SignatureScript) + if err != nil { + return err + } + + return nil +} + +// writeTxInWitnessValueSigning encodes ti to the decred protocol encoding for a +// transaction input (TxIn) witness to w for signing with value included. +func writeTxInWitnessValueSigning(w io.Writer, pver uint32, version int32, + ti *TxIn) error { + var err error + + // ValueIn. + var buf8 [8]byte + binary.LittleEndian.PutUint64(buf8[:], uint64(ti.ValueIn)) + _, err = w.Write(buf8[:]) + if err != nil { + return err + } + + // Signature script. + err = writeVarBytes(w, pver, ti.SignatureScript) + if err != nil { + return err + } + + return nil +} + // readTxOut reads the next sequence of bytes from r as a transaction output // (TxOut). func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error { + var buf8 [8]byte + _, err := io.ReadFull(r, buf8[:]) + if err != nil { + return err + } + to.Value = int64(binary.LittleEndian.Uint64(buf8[:])) + + var buf2 [2]byte + _, err = io.ReadFull(r, buf2[:]) + if err != nil { + return err + } + to.Version = binary.LittleEndian.Uint16(buf2[:]) + + to.PkScript, err = readVarBytes(r, pver, MaxMessagePayload, + "transaction output public key script") + if err != nil { + return err + } + + return nil +} + +// writeTxOut encodes to into the decred protocol encoding for a transaction +// output (TxOut) to w. +func writeTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error { + var buf8 [8]byte + binary.LittleEndian.PutUint64(buf8[:], uint64(to.Value)) + _, err := w.Write(buf8[:]) + if err != nil { + return err + } + + var buf2 [2]byte + binary.LittleEndian.PutUint16(buf2[:], to.Version) + _, err = w.Write(buf2[:]) + if err != nil { + return err + } + + err = writeVarBytes(w, pver, to.PkScript) + if err != nil { + return err + } + return nil +} + +// legacyReadTxOut reads the next sequence of bytes from r as a transaction output +// (TxOut) in legacy Decred format (for tests). +func legacyReadTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error { var buf [8]byte _, err := io.ReadFull(r, buf[:]) if err != nil { @@ -573,9 +1765,9 @@ func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error { return nil } -// writeTxOut encodes to into the bitcoin protocol encoding for a transaction -// output (TxOut) to w. -func writeTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error { +// legacyWriteTxOut encodes to into the decred protocol encoding for a transaction +// output (TxOut) to w in legacy Decred format (for tests). +func legacyWriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error { var buf [8]byte binary.LittleEndian.PutUint64(buf[:], uint64(to.Value)) _, err := w.Write(buf[:]) @@ -589,3 +1781,13 @@ func writeTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error { } return nil } + +// IsSupportedVersion returns if a transaction version is supported or not. +// Currently, inclusion into the memory pool (and thus blocks) only supports +// the DefaultMsgTxVersion. +func IsSupportedMsgTxVersion(msgTx *MsgTx) bool { + if msgTx.Version == DefaultMsgTxVersion() { + return true + } + return false +} diff --git a/wire/msgtx_test.go b/wire/msgtx_test.go index 2e4e2030..bba70f27 100644 --- a/wire/msgtx_test.go +++ b/wire/msgtx_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -11,8 +12,11 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + + "github.com/decred/dcrd/chaincfg/chainhash" + "github.com/decred/dcrd/wire" + "github.com/decred/dcrutil" ) // TestTx tests the MsgTx API. @@ -21,7 +25,7 @@ func TestTx(t *testing.T) { // Block 100000 hash. hashStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hash, err := wire.NewShaHashFromStr(hashStr) + hash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } @@ -48,7 +52,7 @@ func TestTx(t *testing.T) { // NOTE: This is a block hash and made up index, but we're only // testing package functionality. prevOutIndex := uint32(1) - prevOut := wire.NewOutPoint(hash, prevOutIndex) + prevOut := wire.NewOutPoint(hash, prevOutIndex, dcrutil.TxTreeRegular) if !prevOut.Hash.IsEqual(hash) { t.Errorf("NewOutPoint: wrong hash - got %v, want %v", spew.Sprint(&prevOut.Hash), spew.Sprint(hash)) @@ -131,25 +135,29 @@ func TestTx(t *testing.T) { // TestTxSha tests the ability to generate the hash of a transaction accurately. func TestTxSha(t *testing.T) { // Hash of first transaction from block 113875. - hashStr := "f051e59b5e2503ac626d03aaeac8ab7be2d72ba4b7e97119c5852d70d52dcb86" - wantHash, err := wire.NewShaHashFromStr(hashStr) + hashStr := "4538fc1618badd058ee88fd020984451024858796be0a1ed111877f887e1bd53" + wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) return } - // First transaction from block 113875. msgTx := wire.NewMsgTx() txIn := wire.TxIn{ PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 0xffffffff, + Tree: dcrutil.TxTreeRegular, }, - SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, Sequence: 0xffffffff, + ValueIn: 5000000000, + BlockHeight: 0x3F3F3F3F, + BlockIndex: 0x2E2E2E2E, + SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, } txOut := wire.TxOut{ - Value: 5000000000, + Value: 5000000000, + Version: 0xF0F0, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, @@ -167,6 +175,7 @@ func TestTxSha(t *testing.T) { msgTx.AddTxIn(&txIn) msgTx.AddTxOut(&txOut) msgTx.LockTime = 0 + msgTx.Expiry = 0 // Ensure the hash produced is expected. txHash := msgTx.TxSha() @@ -187,6 +196,8 @@ func TestTxWire(t *testing.T) { 0x00, // Varint for number of input transactions 0x00, // Varint for number of output transactions 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, // Expiry + 0x00, // Varint for number of input signatures } tests := []struct { @@ -210,70 +221,6 @@ func TestTxWire(t *testing.T) { multiTxEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version with no transactions. - { - noTx, - noTx, - noTxEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0035Version with multiple transactions. - { - multiTx, - multiTx, - multiTxEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version with no transactions. - { - noTx, - noTx, - noTxEncoded, - wire.BIP0031Version, - }, - - // Protocol version BIP0031Version with multiple transactions. - { - multiTx, - multiTx, - multiTxEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion with no transactions. - { - noTx, - noTx, - noTxEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with multiple transactions. - { - multiTx, - multiTx, - multiTxEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion with no transactions. - { - noTx, - noTx, - noTxEncoded, - wire.MultipleAddressVersion, - }, - - // Protocol version MultipleAddressVersion with multiple transactions. - { - multiTx, - multiTx, - multiTxEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -324,31 +271,43 @@ func TestTxWireErrors(t *testing.T) { readErr error // Expected read error }{ // Force error in version. - {multiTx, multiTxEncoded, pver, 0, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 0, io.ErrShortWrite, io.EOF}, // 0 // Force error in number of transaction inputs. - {multiTx, multiTxEncoded, pver, 4, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 4, io.ErrShortWrite, io.EOF}, // 1 // Force error in transaction input previous block hash. - {multiTx, multiTxEncoded, pver, 5, io.ErrShortWrite, io.EOF}, - // Force error in transaction input previous block hash. - {multiTx, multiTxEncoded, pver, 5, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 5, io.ErrShortWrite, io.EOF}, // 2 // Force error in transaction input previous block output index. - {multiTx, multiTxEncoded, pver, 37, io.ErrShortWrite, io.EOF}, - // Force error in transaction input signature script length. - {multiTx, multiTxEncoded, pver, 41, io.ErrShortWrite, io.EOF}, - // Force error in transaction input signature script. - {multiTx, multiTxEncoded, pver, 42, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 37, io.ErrShortWrite, io.EOF}, // 3 + // Force error in transaction input previous block output tree. + {multiTx, multiTxEncoded, pver, 41, io.ErrShortWrite, io.EOF}, // 4 // Force error in transaction input sequence. - {multiTx, multiTxEncoded, pver, 49, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 42, io.ErrShortWrite, io.EOF}, // 5 // Force error in number of transaction outputs. - {multiTx, multiTxEncoded, pver, 53, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 46, io.ErrShortWrite, io.EOF}, // 6 // Force error in transaction output value. - {multiTx, multiTxEncoded, pver, 54, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 47, io.ErrShortWrite, io.EOF}, // 7 + // Force error in transaction output script version. + {multiTx, multiTxEncoded, pver, 55, io.ErrShortWrite, io.EOF}, // 8 // Force error in transaction output pk script length. - {multiTx, multiTxEncoded, pver, 62, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 57, io.ErrShortWrite, io.EOF}, // 9 // Force error in transaction output pk script. - {multiTx, multiTxEncoded, pver, 63, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 58, io.ErrShortWrite, io.EOF}, // 10 // Force error in transaction output lock time. - {multiTx, multiTxEncoded, pver, 130, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, pver, 203, io.ErrShortWrite, io.EOF}, // 11 + // Force error in transaction output expiry. + {multiTx, multiTxEncoded, pver, 207, io.ErrShortWrite, io.EOF}, // 12 + // Force error in transaction num sig varint. + {multiTx, multiTxEncoded, pver, 211, io.ErrShortWrite, io.EOF}, // 13 + // Force error in transaction sig 0 AmountIn. + {multiTx, multiTxEncoded, pver, 212, io.ErrShortWrite, io.EOF}, // 14 + // Force error in transaction sig 0 BlockHeight. + {multiTx, multiTxEncoded, pver, 220, io.ErrShortWrite, io.EOF}, // 15 + // Force error in transaction sig 0 BlockIndex. + {multiTx, multiTxEncoded, pver, 224, io.ErrShortWrite, io.EOF}, // 16 + // Force error in transaction sig 0 length. + {multiTx, multiTxEncoded, pver, 228, io.ErrShortWrite, io.EOF}, // 17 + // Force error in transaction sig 0 signature script. + {multiTx, multiTxEncoded, pver, 229, io.ErrShortWrite, io.EOF}, // 18 } t.Logf("Running %d tests", len(tests)) @@ -383,6 +342,8 @@ func TestTxSerialize(t *testing.T) { 0x00, // Varint for number of input transactions 0x00, // Varint for number of output transactions 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, // Expiry + 0x00, // Varint for number of input signatures } tests := []struct { @@ -423,6 +384,377 @@ func TestTxSerialize(t *testing.T) { continue } + // Test SerializeSize. + sz := test.in.SerializeSize() + actualSz := len(buf.Bytes()) + if sz != actualSz { + t.Errorf("Wrong serialize size #%d\n got: %s want: %s", i, + sz, actualSz) + } + + // Deserialize the transaction. + var tx wire.MsgTx + rbuf := bytes.NewReader(test.buf) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&tx, test.out) { + t.Errorf("Deserialize #%d\n got: %s want: %s", i, + spew.Sdump(&tx), spew.Sdump(test.out)) + continue + } + + // Ensure the public key script locations are accurate. + pkScriptLocs := test.in.PkScriptLocs() + if !reflect.DeepEqual(pkScriptLocs, test.pkScriptLocs) { + t.Errorf("PkScriptLocs #%d\n got: %s want: %s", i, + spew.Sdump(pkScriptLocs), + spew.Sdump(test.pkScriptLocs)) + continue + } + for j, loc := range pkScriptLocs { + wantPkScript := test.in.TxOut[j].PkScript + gotPkScript := test.buf[loc : loc+len(wantPkScript)] + if !bytes.Equal(gotPkScript, wantPkScript) { + t.Errorf("PkScriptLocs #%d:%d\n unexpected "+ + "script got: %s want: %s", i, j, + spew.Sdump(gotPkScript), + spew.Sdump(wantPkScript)) + } + } + } +} + +// TestTxSerializePrefix tests MsgTx serialize and deserialize. +func TestTxSerializePrefix(t *testing.T) { + noTx := wire.NewMsgTx() + noTx.Version = 65537 + noTxEncoded := []byte{ + 0x01, 0x00, 0x01, 0x00, // Version + 0x00, // Varint for number of input transactions + 0x00, // Varint for number of output transactions + 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, // Expiry + } + + tests := []struct { + in *wire.MsgTx // Message to encode + out *wire.MsgTx // Expected decoded message + buf []byte // Serialized data + pkScriptLocs []int // Expected output script locations + }{ + // No transactions. + { + noTx, + noTx, + noTxEncoded, + nil, + }, + + // Multiple transactions. + { + multiTxPrefix, + multiTxPrefix, + multiTxPrefixEncoded, + multiTxPkScriptLocs, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Serialize the transaction. + var buf bytes.Buffer + err := test.in.Serialize(&buf) + if err != nil { + t.Errorf("Serialize #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("Serialize #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Test SerializeSize. + sz := test.in.SerializeSize() + actualSz := len(buf.Bytes()) + if sz != actualSz { + t.Errorf("Wrong serialize size #%d\n got: %s want: %s", i, + sz, actualSz) + } + + // Deserialize the transaction. + var tx wire.MsgTx + rbuf := bytes.NewReader(test.buf) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&tx, test.out) { + t.Errorf("Deserialize #%d\n got: %s want: %s", i, + spew.Sdump(&tx), spew.Sdump(test.out)) + continue + } + + // Ensure the public key script locations are accurate. + pkScriptLocs := test.in.PkScriptLocs() + if !reflect.DeepEqual(pkScriptLocs, test.pkScriptLocs) { + t.Errorf("PkScriptLocs #%d\n got: %s want: %s", i, + spew.Sdump(pkScriptLocs), + spew.Sdump(test.pkScriptLocs)) + continue + } + for j, loc := range pkScriptLocs { + wantPkScript := test.in.TxOut[j].PkScript + gotPkScript := test.buf[loc : loc+len(wantPkScript)] + if !bytes.Equal(gotPkScript, wantPkScript) { + t.Errorf("PkScriptLocs #%d:%d\n unexpected "+ + "script got: %s want: %s", i, j, + spew.Sdump(gotPkScript), + spew.Sdump(wantPkScript)) + } + } + } +} + +// TestTxSerializeWitness tests MsgTx serialize and deserialize. +func TestTxSerializeWitness(t *testing.T) { + noTx := wire.NewMsgTx() + noTx.Version = 131073 + noTxEncoded := []byte{ + 0x01, 0x00, 0x02, 0x00, // Version + 0x00, // Varint for number of input signatures + } + + tests := []struct { + in *wire.MsgTx // Message to encode + out *wire.MsgTx // Expected decoded message + buf []byte // Serialized data + pkScriptLocs []int // Expected output script locations + }{ + // No transactions. + { + noTx, + noTx, + noTxEncoded, + nil, + }, + + // Multiple transactions. + { + multiTxWitness, + multiTxWitness, + multiTxWitnessEncoded, + nil, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Serialize the transaction. + var buf bytes.Buffer + err := test.in.Serialize(&buf) + if err != nil { + t.Errorf("Serialize #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("Serialize #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Test SerializeSize. + sz := test.in.SerializeSize() + actualSz := len(buf.Bytes()) + if sz != actualSz { + t.Errorf("Wrong serialize size #%d\n got: %s want: %s", i, + sz, actualSz) + } + + // Deserialize the transaction. + var tx wire.MsgTx + rbuf := bytes.NewReader(test.buf) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&tx, test.out) { + t.Errorf("Deserialize #%d\n got: %s want: %s", i, + spew.Sdump(&tx), spew.Sdump(test.out)) + continue + } + + // Ensure the public key script locations are accurate. + pkScriptLocs := test.in.PkScriptLocs() + if !reflect.DeepEqual(pkScriptLocs, test.pkScriptLocs) { + t.Errorf("PkScriptLocs #%d\n got: %s want: %s", i, + spew.Sdump(pkScriptLocs), + spew.Sdump(test.pkScriptLocs)) + continue + } + for j, loc := range pkScriptLocs { + wantPkScript := test.in.TxOut[j].PkScript + gotPkScript := test.buf[loc : loc+len(wantPkScript)] + if !bytes.Equal(gotPkScript, wantPkScript) { + t.Errorf("PkScriptLocs #%d:%d\n unexpected "+ + "script got: %s want: %s", i, j, + spew.Sdump(gotPkScript), + spew.Sdump(wantPkScript)) + } + } + } +} + +// TestTxSerializeWitnessSigning tests MsgTx serialize and deserialize. +func TestTxSerializeWitnessSigning(t *testing.T) { + noTx := wire.NewMsgTx() + noTx.Version = 196609 + noTxEncoded := []byte{ + 0x01, 0x00, 0x03, 0x00, // Version + 0x00, // Varint for number of input signatures + } + + tests := []struct { + in *wire.MsgTx // Message to encode + out *wire.MsgTx // Expected decoded message + buf []byte // Serialized data + pkScriptLocs []int // Expected output script locations + }{ + // No transactions. + { + noTx, + noTx, + noTxEncoded, + nil, + }, + + // Multiple transactions. + { + multiTxWitnessSigning, + multiTxWitnessSigning, + multiTxWitnessSigningEncoded, + nil, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Serialize the transaction. + var buf bytes.Buffer + err := test.in.Serialize(&buf) + if err != nil { + t.Errorf("Serialize #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("Serialize #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Test SerializeSize. + sz := test.in.SerializeSize() + actualSz := len(buf.Bytes()) + if sz != actualSz { + t.Errorf("Wrong serialize size #%d\n got: %s want: %s", i, + sz, actualSz) + } + + // Deserialize the transaction. + var tx wire.MsgTx + rbuf := bytes.NewReader(test.buf) + err = tx.Deserialize(rbuf) + if err != nil { + t.Errorf("Deserialize #%d error %v", i, err) + continue + } + if !reflect.DeepEqual(&tx, test.out) { + t.Errorf("Deserialize #%d\n got: %s want: %s", i, + spew.Sdump(&tx), spew.Sdump(test.out)) + continue + } + + // Ensure the public key script locations are accurate. + pkScriptLocs := test.in.PkScriptLocs() + if !reflect.DeepEqual(pkScriptLocs, test.pkScriptLocs) { + t.Errorf("PkScriptLocs #%d\n got: %s want: %s", i, + spew.Sdump(pkScriptLocs), + spew.Sdump(test.pkScriptLocs)) + continue + } + for j, loc := range pkScriptLocs { + wantPkScript := test.in.TxOut[j].PkScript + gotPkScript := test.buf[loc : loc+len(wantPkScript)] + if !bytes.Equal(gotPkScript, wantPkScript) { + t.Errorf("PkScriptLocs #%d:%d\n unexpected "+ + "script got: %s want: %s", i, j, + spew.Sdump(gotPkScript), + spew.Sdump(wantPkScript)) + } + } + } +} + +// TestTxSerializeWitnessValueSigning tests MsgTx serialize and deserialize. +func TestTxSerializeWitnessValueSigning(t *testing.T) { + noTx := wire.NewMsgTx() + noTx.Version = 262145 + noTxEncoded := []byte{ + 0x01, 0x00, 0x04, 0x00, // Version + 0x00, // Varint for number of input signatures + } + + tests := []struct { + in *wire.MsgTx // Message to encode + out *wire.MsgTx // Expected decoded message + buf []byte // Serialized data + pkScriptLocs []int // Expected output script locations + }{ + // No transactions. + { + noTx, + noTx, + noTxEncoded, + nil, + }, + + // Multiple transactions. + { + multiTxWitnessValueSigning, + multiTxWitnessValueSigning, + multiTxWitnessValueSigningEncoded, + nil, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + // Serialize the transaction. + var buf bytes.Buffer + err := test.in.Serialize(&buf) + if err != nil { + t.Errorf("Serialize #%d error %v", i, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.buf) { + t.Errorf("Serialize #%d\n got: %s want: %s", i, + spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) + continue + } + + // Test SerializeSize. + sz := test.in.SerializeSize() + actualSz := len(buf.Bytes()) + if sz != actualSz { + t.Errorf("Wrong serialize size #%d\n got: %s want: %s", i, + sz, actualSz) + } + // Deserialize the transaction. var tx wire.MsgTx rbuf := bytes.NewReader(test.buf) @@ -474,26 +806,38 @@ func TestTxSerializeErrors(t *testing.T) { {multiTx, multiTxEncoded, 4, io.ErrShortWrite, io.EOF}, // Force error in transaction input previous block hash. {multiTx, multiTxEncoded, 5, io.ErrShortWrite, io.EOF}, - // Force error in transaction input previous block hash. - {multiTx, multiTxEncoded, 5, io.ErrShortWrite, io.EOF}, // Force error in transaction input previous block output index. {multiTx, multiTxEncoded, 37, io.ErrShortWrite, io.EOF}, - // Force error in transaction input signature script length. + // Force error in transaction input previous block output tree. {multiTx, multiTxEncoded, 41, io.ErrShortWrite, io.EOF}, - // Force error in transaction input signature script. - {multiTx, multiTxEncoded, 42, io.ErrShortWrite, io.EOF}, // Force error in transaction input sequence. - {multiTx, multiTxEncoded, 49, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, 42, io.ErrShortWrite, io.EOF}, // Force error in number of transaction outputs. - {multiTx, multiTxEncoded, 53, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, 46, io.ErrShortWrite, io.EOF}, // Force error in transaction output value. - {multiTx, multiTxEncoded, 54, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, 47, io.ErrShortWrite, io.EOF}, + // Force error in transaction output version. + {multiTx, multiTxEncoded, 55, io.ErrShortWrite, io.EOF}, // Force error in transaction output pk script length. - {multiTx, multiTxEncoded, 62, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, 57, io.ErrShortWrite, io.EOF}, // Force error in transaction output pk script. - {multiTx, multiTxEncoded, 63, io.ErrShortWrite, io.EOF}, - // Force error in transaction output lock time. - {multiTx, multiTxEncoded, 130, io.ErrShortWrite, io.EOF}, + {multiTx, multiTxEncoded, 58, io.ErrShortWrite, io.EOF}, + // Force error in transaction lock time. + {multiTx, multiTxEncoded, 203, io.ErrShortWrite, io.EOF}, + // Force error in transaction expiry. + {multiTx, multiTxEncoded, 207, io.ErrShortWrite, io.EOF}, + // Force error in transaction num sig varint. + {multiTx, multiTxEncoded, 211, io.ErrShortWrite, io.EOF}, + // Force error in transaction sig 0 ValueIn. + {multiTx, multiTxEncoded, 212, io.ErrShortWrite, io.EOF}, + // Force error in transaction sig 0 BlockHeight. + {multiTx, multiTxEncoded, 220, io.ErrShortWrite, io.EOF}, + // Force error in transaction sig 0 BlockIndex. + {multiTx, multiTxEncoded, 224, io.ErrShortWrite, io.EOF}, + // Force error in transaction sig 0 length. + {multiTx, multiTxEncoded, 228, io.ErrShortWrite, io.EOF}, + // Force error in transaction sig 0 signature script. + {multiTx, multiTxEncoded, 229, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) @@ -524,64 +868,97 @@ func TestTxSerializeErrors(t *testing.T) { // of inputs and outputs are handled properly. This could otherwise potentially // be used as an attack vector. func TestTxOverflowErrors(t *testing.T) { - // Use protocol version 70001 and transaction version 1 specifically + // Use protocol version 1 and transaction version 1 specifically // here instead of the latest values because the test data is using // bytes encoded with those versions. - pver := uint32(70001) - txVer := uint32(1) + pver := uint32(1) + txVer := wire.DefaultMsgTxVersion() tests := []struct { buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding - version uint32 // Transaction version + version int32 // Transaction version err error // Expected error }{ - // Transaction that claims to have ~uint64(0) inputs. + // Transaction that claims to have ~uint64(0) inputs. [0] { []byte{ - 0x00, 0x00, 0x00, 0x01, // Version + 0x01, 0x00, 0x00, 0x00, // Version 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Varint for number of input transactions }, pver, txVer, &wire.MessageError{}, }, - // Transaction that claims to have ~uint64(0) outputs. + // Transaction that claims to have ~uint64(0) outputs. [1] { []byte{ - 0x00, 0x00, 0x00, 0x01, // Version + 0x01, 0x00, 0x00, 0x00, // Version 0x00, // Varint for number of input transactions 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Varint for number of output transactions }, pver, txVer, &wire.MessageError{}, }, - // Transaction that has an input with a signature script that + // Transaction that has an input with a signature script that [2] // claims to have ~uint64(0) length. { []byte{ - 0x00, 0x00, 0x00, 0x01, // Version + 0x01, 0x00, 0x00, 0x00, // Version 0x01, // Varint for number of input transactions 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, // Varint for length of signature script + 0xff, 0xff, 0xff, 0xff, // Previous output index + 0x00, // Previous output tree + 0x00, // Varint for length of signature script + 0xff, 0xff, 0xff, 0xff, // Sequence + 0x02, // Varint for number of output transactions + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount + 0x43, // Varint for length of pk script + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount + 0x43, // Varint for length of pk script + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, // Expiry + 0x01, // Varint for number of input signature + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Varint for sig script length (overflows) }, pver, txVer, &wire.MessageError{}, }, - // Transaction that has an output with a public key script + // Transaction that has an output with a public key script [3] // that claims to have ~uint64(0) length. { []byte{ - 0x00, 0x00, 0x00, 0x01, // Version + 0x01, 0x00, 0x00, 0x00, // Version 0x01, // Varint for number of input transactions 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0x00, // Previous output tree 0x00, // Varint for length of signature script 0xff, 0xff, 0xff, 0xff, // Sequence 0x01, // Varint for number of output transactions @@ -601,7 +978,6 @@ func TestTxOverflowErrors(t *testing.T) { if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) - continue } // Decode from wire format. @@ -627,10 +1003,10 @@ func TestTxSerializeSize(t *testing.T) { size int // Expected serialized size }{ // No inputs or outpus. - {noTx, 10}, + {noTx, 15}, - // Transcaction with an input and an output. - {multiTx, 210}, + // Transaction with an input and an output. + {multiTx, 236}, } t.Logf("Running %d tests", len(tests)) @@ -650,18 +1026,22 @@ var multiTx = &wire.MsgTx{ TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ - Hash: wire.ShaHash{}, + Hash: chainhash.Hash{}, Index: 0xffffffff, }, + Sequence: 0xffffffff, + ValueIn: 0x1212121212121212, + BlockHeight: 0x15151515, + BlockIndex: 0x34343434, SignatureScript: []byte{ 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, }, - Sequence: 0xffffffff, }, }, TxOut: []*wire.TxOut{ { - Value: 0x12a05f200, + Value: 0x12a05f200, + Version: 0xabab, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, @@ -677,7 +1057,8 @@ var multiTx = &wire.MsgTx{ }, }, { - Value: 0x5f5e100, + Value: 0x5f5e100, + Version: 0xbcbc, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, @@ -694,25 +1075,122 @@ var multiTx = &wire.MsgTx{ }, }, LockTime: 0, + Expiry: 0, +} + +// multiTxPrefix is a MsgTx prefix with an input and output and used in various tests. +var multiTxPrefix = &wire.MsgTx{ + Version: 65537, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + }, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 0x12a05f200, + Version: 0xabab, + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + { + Value: 0x5f5e100, + Version: 0xbcbc, + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 0, + Expiry: 0, +} + +// multiTxWitness is a MsgTx witness with only input witness. +var multiTxWitness = &wire.MsgTx{ + Version: 131073, + TxIn: []*wire.TxIn{ + { + ValueIn: 0x1212121212121212, + BlockHeight: 0x15151515, + BlockIndex: 0x34343434, + SignatureScript: []byte{ + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, + }, + }, + }, + TxOut: []*wire.TxOut{}, +} + +// multiTxWitnessSigning is a MsgTx witness with only input witness sigscripts. +var multiTxWitnessSigning = &wire.MsgTx{ + Version: 196609, + TxIn: []*wire.TxIn{ + { + SignatureScript: []byte{ + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, + }, + }, + }, + TxOut: []*wire.TxOut{}, +} + +// multiTxWitnessValueSigning is a MsgTx witness with only input witness +// sigscripts. +var multiTxWitnessValueSigning = &wire.MsgTx{ + Version: 262145, + TxIn: []*wire.TxIn{ + { + ValueIn: 0x1212121212121212, + SignatureScript: []byte{ + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, + }, + }, + }, + TxOut: []*wire.TxOut{}, } // multiTxEncoded is the wire encoded bytes for multiTx using protocol version -// 60002 and is used in the various tests. +// 0 and is used in the various tests. var multiTxEncoded = []byte{ - 0x01, 0x00, 0x00, 0x00, // Version - 0x01, // Varint for number of input transactions - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, // Version [0] + 0x01, // Varint for number of input transactions [4] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [5] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index - 0x07, // Varint for length of signature script - 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script - 0xff, 0xff, 0xff, 0xff, // Sequence - 0x02, // Varint for number of output transactions - 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount - 0x43, // Varint for length of pk script - 0x41, // OP_DATA_65 + 0xff, 0xff, 0xff, 0xff, // Previous output index [37] + 0x00, // Previous output tree [41] + 0xff, 0xff, 0xff, 0xff, // Sequence [42] + 0x02, // Varint for number of output transactions [46] + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount [47] + 0xab, 0xab, // Script version [55] + 0x43, // Varint for length of pk script [57] + 0x41, // OP_DATA_65 [58] 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, @@ -721,10 +1199,11 @@ var multiTxEncoded = []byte{ 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature + 0xa6, // 65-byte pubkey 0xac, // OP_CHECKSIG - 0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount - 0x43, // Varint for length of pk script + 0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount [123] + 0xbc, 0xbc, // Script version [134] + 0x43, // Varint for length of pk script [136] 0x41, // OP_DATA_65 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, @@ -736,9 +1215,92 @@ var multiTxEncoded = []byte{ 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, 0xa6, // 65-byte signature 0xac, // OP_CHECKSIG - 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, // Lock time [203] + 0x00, 0x00, 0x00, 0x00, // Expiry [207] + 0x01, // Varint for number of input signature [211] + 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, // ValueIn [212] + 0x15, 0x15, 0x15, 0x15, // BlockHeight [220] + 0x34, 0x34, 0x34, 0x34, // BlockIndex [224] + 0x07, // Varint for length of signature script [228] + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script [229] +} + +// multiTxPrefixEncoded is the wire encoded bytes for multiTx using protocol +// version 1 and is used in the various tests. +var multiTxPrefixEncoded = []byte{ + 0x01, 0x00, 0x01, 0x00, // Version [0] + 0x01, // Varint for number of input transactions [4] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // [5] + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash + 0xff, 0xff, 0xff, 0xff, // Previous output index [37] + 0x00, // Previous output tree [41] + 0xff, 0xff, 0xff, 0xff, // Sequence [43] + 0x02, // Varint for number of output transactions [47] + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount [48] + 0xab, 0xab, // Script version + 0x43, // Varint for length of pk script [56] + 0x41, // OP_DATA_65 [57] + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount [124] + 0xbc, 0xbc, // Script version + 0x43, // Varint for length of pk script [132] + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x00, 0x00, 0x00, 0x00, // Lock time [198] + 0x00, 0x00, 0x00, 0x00, // Expiry [202] +} + +// multiTxWitnessEncoded is the wire encoded bytes for multiTx using protocol version +// 1 and is used in the various tests. +var multiTxWitnessEncoded = []byte{ + 0x01, 0x00, 0x02, 0x00, // Version + 0x01, // Varint for number of input signature + 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, // ValueIn + 0x15, 0x15, 0x15, 0x15, // BlockHeight + 0x34, 0x34, 0x34, 0x34, // BlockIndex + 0x07, // Varint for length of signature script + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script +} + +// multiTxWitnessSigningEncoded is the wire encoded bytes for multiTx using protocol version +// 1 and is used in the various tests. +var multiTxWitnessSigningEncoded = []byte{ + 0x01, 0x00, 0x03, 0x00, // Version + 0x01, // Varint for number of input signature + 0x07, // Varint for length of signature script + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script +} + +// multiTxWitnessValueSigningEncoded is the wire encoded bytes for multiTx using protocol version +// 1 and is used in the various tests. +var multiTxWitnessValueSigningEncoded = []byte{ + 0x01, 0x00, 0x04, 0x00, // Version + 0x01, // Varint for number of input signature + 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, // ValueIn + 0x07, // Varint for length of signature script + 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script } // multiTxPkScriptLocs is the location information for the public key scripts // located in multiTx. -var multiTxPkScriptLocs = []int{63, 139} +var multiTxPkScriptLocs = []int{58, 136} diff --git a/wire/msgverack.go b/wire/msgverack.go index 6d89e61a..ba6ef4cd 100644 --- a/wire/msgverack.go +++ b/wire/msgverack.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -8,20 +9,20 @@ import ( "io" ) -// MsgVerAck defines a bitcoin verack message which is used for a peer to +// MsgVerAck defines a decred verack message which is used for a peer to // acknowledge a version message (MsgVersion) after it has used the information // to negotiate parameters. It implements the Message interface. // // This message has no payload. type MsgVerAck struct{} -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgVerAck) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgVerAck) BtcEncode(w io.Writer, pver uint32) error { return nil @@ -39,7 +40,7 @@ func (msg *MsgVerAck) MaxPayloadLength(pver uint32) uint32 { return 0 } -// NewMsgVerAck returns a new bitcoin verack message that conforms to the +// NewMsgVerAck returns a new decred verack message that conforms to the // Message interface. func NewMsgVerAck() *MsgVerAck { return &MsgVerAck{} diff --git a/wire/msgverack_test.go b/wire/msgverack_test.go index 3f02898f..c12f0650 100644 --- a/wire/msgverack_test.go +++ b/wire/msgverack_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -9,8 +10,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestVerAck tests the MsgVerAck API. @@ -56,38 +57,6 @@ func TestVerAckWire(t *testing.T) { msgVerAckEncoded, wire.ProtocolVersion, }, - - // Protocol version BIP0035Version. - { - msgVerAck, - msgVerAck, - msgVerAckEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - msgVerAck, - msgVerAck, - msgVerAckEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - msgVerAck, - msgVerAck, - msgVerAckEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - msgVerAck, - msgVerAck, - msgVerAckEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/msgversion.go b/wire/msgversion.go index 7b6dbdc1..7f1df5e2 100644 --- a/wire/msgversion.go +++ b/wire/msgversion.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -18,9 +19,9 @@ import ( const MaxUserAgentLen = 2000 // DefaultUserAgent for wire in the stack -const DefaultUserAgent = "/btcwire:0.2.0/" +const DefaultUserAgent = "/dcrwire:0.0.1/" -// MsgVersion implements the Message interface and represents a bitcoin version +// MsgVersion implements the Message interface and represents a decred version // message. It is used for a peer to advertise itself as soon as an outbound // connection is made. The remote peer then uses this information along with // its own to negotiate. The remote peer must then respond with a version @@ -73,7 +74,7 @@ func (msg *MsgVersion) AddService(service ServiceFlag) { msg.Services |= service } -// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// BtcDecode decodes r using the decred protocol encoding into the receiver. // The version message is special in that the protocol version hasn't been // negotiated yet. As a result, the pver field is ignored and any fields which // are added in new versions are optional. This also mean that r must be a @@ -152,7 +153,7 @@ func (msg *MsgVersion) BtcDecode(r io.Reader, pver uint32) error { return nil } -// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// BtcEncode encodes the receiver to w using the decred protocol encoding. // This is part of the Message interface implementation. func (msg *MsgVersion) BtcEncode(w io.Writer, pver uint32) error { err := validateUserAgent(msg.UserAgent) @@ -191,15 +192,11 @@ func (msg *MsgVersion) BtcEncode(w io.Writer, pver uint32) error { return err } - // There was no relay transactions field before BIP0037Version. Also, - // the wire encoding for the field is true when transactions should be - // relayed, so reverse it from the DisableRelayTx field. - if pver >= BIP0037Version { - err = writeElement(w, !msg.DisableRelayTx) - if err != nil { - return err - } + err = writeElement(w, !msg.DisableRelayTx) + if err != nil { + return err } + return nil } @@ -222,7 +219,7 @@ func (msg *MsgVersion) MaxPayloadLength(pver uint32) uint32 { MaxUserAgentLen } -// NewMsgVersion returns a new bitcoin version message that conforms to the +// NewMsgVersion returns a new decred version message that conforms to the // Message interface using the passed parameters and defaults for the remaining // fields. func NewMsgVersion(me *NetAddress, you *NetAddress, nonce uint64, @@ -244,7 +241,7 @@ func NewMsgVersion(me *NetAddress, you *NetAddress, nonce uint64, } // NewMsgVersionFromConn is a convenience function that extracts the remote -// and local address from conn and returns a new bitcoin version message that +// and local address from conn and returns a new decred version message that // conforms to the Message interface. See NewMsgVersion. func NewMsgVersionFromConn(conn net.Conn, nonce uint64, lastBlock int32) (*MsgVersion, error) { diff --git a/wire/msgversion_test.go b/wire/msgversion_test.go index ee701ec9..00ca8b4f 100644 --- a/wire/msgversion_test.go +++ b/wire/msgversion_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -13,8 +14,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestVersion tests the MsgVersion API. @@ -199,56 +200,6 @@ func TestVersionWire(t *testing.T) { baseVersionBIP0037Encoded, wire.ProtocolVersion, }, - - // Protocol version BIP0037Version with relay transactions field - // true. - { - baseVersionBIP0037, - baseVersionBIP0037, - baseVersionBIP0037Encoded, - wire.BIP0037Version, - }, - - // Protocol version BIP0037Version with relay transactions field - // false. - { - verRelayTxFalse, - verRelayTxFalse, - verRelayTxFalseEncoded, - wire.BIP0037Version, - }, - - // Protocol version BIP0035Version. - { - baseVersion, - baseVersion, - baseVersionEncoded, - wire.BIP0035Version, - }, - - // Protocol version BIP0031Version. - { - baseVersion, - baseVersion, - baseVersionEncoded, - wire.BIP0031Version, - }, - - // Protocol version NetAddressTimeVersion. - { - baseVersion, - baseVersion, - baseVersionEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version MultipleAddressVersion. - { - baseVersion, - baseVersion, - baseVersionEncoded, - wire.MultipleAddressVersion, - }, } t.Logf("Running %d tests", len(tests)) @@ -349,12 +300,6 @@ func TestVersionWireErrors(t *testing.T) { {baseVersion, baseVersionEncoded, pver, 82, io.ErrShortWrite, io.ErrUnexpectedEOF}, // Force error in last block. {baseVersion, baseVersionEncoded, pver, 98, io.ErrShortWrite, io.ErrUnexpectedEOF}, - // Force error in relay tx - no read error should happen since - // it's optional. - { - baseVersionBIP0037, baseVersionBIP0037Encoded, - wire.BIP0037Version, 101, io.ErrShortWrite, nil, - }, // Force error due to user agent too big {exceedUAVer, exceedUAVerEncoded, pver, newLen, wireErr, wireErr}, } @@ -443,7 +388,7 @@ func TestVersionOptionalFields(t *testing.T) { // uaVersion is a version message that contains all fields through // the UserAgent field. uaVersion := nonceVersion - uaVersion.UserAgent = "/btcdtest:0.0.1/" + uaVersion.UserAgent = "/dcrdtest:0.0.1/" uaVersionEncoded := make([]byte, len(baseVersionEncoded)-4) copy(uaVersionEncoded, baseVersionEncoded) @@ -521,7 +466,7 @@ var baseVersion = &wire.MsgVersion{ Port: 8333, }, Nonce: 123123, // 0x1e0f3 - UserAgent: "/btcdtest:0.0.1/", + UserAgent: "/dcrdtest:0.0.1/", LastBlock: 234234, // 0x392fa } @@ -543,7 +488,7 @@ var baseVersionEncoded = []byte{ 0x20, 0x8d, // Port 8333 in big-endian 0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Nonce 0x10, // Varint for user agent length - 0x2f, 0x62, 0x74, 0x63, 0x64, 0x74, 0x65, 0x73, + 0x2f, 0x64, 0x63, 0x72, 0x64, 0x74, 0x65, 0x73, 0x74, 0x3a, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x2f, // User agent 0xfa, 0x92, 0x03, 0x00, // Last block } @@ -567,7 +512,7 @@ var baseVersionBIP0037 = &wire.MsgVersion{ Port: 8333, }, Nonce: 123123, // 0x1e0f3 - UserAgent: "/btcdtest:0.0.1/", + UserAgent: "/dcrdtest:0.0.1/", LastBlock: 234234, // 0x392fa } @@ -589,7 +534,7 @@ var baseVersionBIP0037Encoded = []byte{ 0x20, 0x8d, // Port 8333 in big-endian 0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Nonce 0x10, // Varint for user agent length - 0x2f, 0x62, 0x74, 0x63, 0x64, 0x74, 0x65, 0x73, + 0x2f, 0x64, 0x63, 0x72, 0x64, 0x74, 0x65, 0x73, 0x74, 0x3a, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x2f, // User agent 0xfa, 0x92, 0x03, 0x00, // Last block 0x01, // Relay tx diff --git a/wire/netaddress.go b/wire/netaddress.go index cf5a7956..dbf5002b 100644 --- a/wire/netaddress.go +++ b/wire/netaddress.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -16,17 +17,14 @@ import ( // a TCP address as required. var ErrInvalidNetAddr = errors.New("provided net.Addr is not a net.TCPAddr") -// maxNetAddressPayload returns the max payload size for a bitcoin NetAddress +// maxNetAddressPayload returns the max payload size for a decred NetAddress // based on the protocol version. func maxNetAddressPayload(pver uint32) uint32 { // Services 8 bytes + ip 16 bytes + port 2 bytes. plen := uint32(26) - // NetAddressTimeVersion added a timestamp field. - if pver >= NetAddressTimeVersion { - // Timestamp 4 bytes. - plen += 4 - } + // Timestamp 4 bytes. + plen += 4 return plen } @@ -36,7 +34,7 @@ func maxNetAddressPayload(pver uint32) uint32 { type NetAddress struct { // Last time the address was seen. This is, unfortunately, encoded as a // uint32 on the wire and therefore is limited to 2106. This field is - // not present in the bitcoin version message (MsgVersion) nor was it + // not present in the decred version message (MsgVersion) nor was it // added until protocol version >= NetAddressTimeVersion. Timestamp time.Time @@ -110,10 +108,10 @@ func readNetAddress(r io.Reader, pver uint32, na *NetAddress, ts bool) error { var ip [16]byte var port uint16 - // NOTE: The bitcoin protocol uses a uint32 for the timestamp so it will + // NOTE: The decred protocol uses a uint32 for the timestamp so it will // stop working somewhere around 2106. Also timestamp wasn't added until // protocol version >= NetAddressTimeVersion - if ts && pver >= NetAddressTimeVersion { + if ts { var stamp uint32 err := readElement(r, &stamp) if err != nil { @@ -126,7 +124,7 @@ func readNetAddress(r io.Reader, pver uint32, na *NetAddress, ts bool) error { if err != nil { return err } - // Sigh. Bitcoin protocol mixes little and big endian. + // Sigh. Decred protocol mixes little and big endian. err = binary.Read(r, binary.BigEndian, &port) if err != nil { return err @@ -142,10 +140,10 @@ func readNetAddress(r io.Reader, pver uint32, na *NetAddress, ts bool) error { // version and whether or not the timestamp is included per ts. Some messages // like version do not include the timestamp. func writeNetAddress(w io.Writer, pver uint32, na *NetAddress, ts bool) error { - // NOTE: The bitcoin protocol uses a uint32 for the timestamp so it will + // NOTE: The decred protocol uses a uint32 for the timestamp so it will // stop working somewhere around 2106. Also timestamp wasn't added until // until protocol version >= NetAddressTimeVersion. - if ts && pver >= NetAddressTimeVersion { + if ts { err := writeElement(w, uint32(na.Timestamp.Unix())) if err != nil { return err @@ -162,7 +160,7 @@ func writeNetAddress(w io.Writer, pver uint32, na *NetAddress, ts bool) error { return err } - // Sigh. Bitcoin protocol mixes little and big endian. + // Sigh. Decred protocol mixes little and big endian. err = binary.Write(w, binary.BigEndian, na.Port) if err != nil { return err diff --git a/wire/netaddress_test.go b/wire/netaddress_test.go index a03c586f..bcedf67b 100644 --- a/wire/netaddress_test.go +++ b/wire/netaddress_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,8 +13,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/wire" ) // TestNetAddress tests the NetAddress API. @@ -67,17 +68,6 @@ func TestNetAddress(t *testing.T) { maxPayload, wantPayload) } - // Protocol version before NetAddressTimeVersion when timestamp was - // added. Ensure max payload is expected value for it. - pver = wire.NetAddressTimeVersion - 1 - wantPayload = 26 - maxPayload = wire.TstMaxNetAddressPayload(pver) - if maxPayload != wantPayload { - t.Errorf("maxNetAddressPayload: wrong max payload length for "+ - "protocol version %d - got %v, want %v", pver, - maxPayload, wantPayload) - } - // Check for expected failure on wrong address type. udpAddr := &net.UDPAddr{} _, err = wire.NewNetAddress(udpAddr, 0) @@ -144,45 +134,6 @@ func TestNetAddressWire(t *testing.T) { baseNetAddrEncoded, wire.ProtocolVersion, }, - - // Protocol version NetAddressTimeVersion without ts flag. - { - baseNetAddr, - baseNetAddrNoTS, - false, - baseNetAddrNoTSEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion with ts flag. - { - baseNetAddr, - baseNetAddr, - true, - baseNetAddrEncoded, - wire.NetAddressTimeVersion, - }, - - // Protocol version NetAddressTimeVersion-1 without ts flag. - { - baseNetAddr, - baseNetAddrNoTS, - false, - baseNetAddrNoTSEncoded, - wire.NetAddressTimeVersion - 1, - }, - - // Protocol version NetAddressTimeVersion-1 with timestamp. - // Even though the timestamp flag is set, this shouldn't have a - // timestamp since it is a protocol version before it was - // added. - { - baseNetAddr, - baseNetAddrNoTS, - true, - baseNetAddrNoTSEncoded, - wire.NetAddressTimeVersion - 1, - }, } t.Logf("Running %d tests", len(tests)) @@ -220,7 +171,6 @@ func TestNetAddressWire(t *testing.T) { // decode NetAddress to confirm error paths work correctly. func TestNetAddressWireErrors(t *testing.T) { pver := wire.ProtocolVersion - pverNAT := wire.NetAddressTimeVersion - 1 // baseNetAddr is used in the various tests as a baseline NetAddress. baseNetAddr := wire.NetAddress{ @@ -258,16 +208,6 @@ func TestNetAddressWireErrors(t *testing.T) { {&baseNetAddr, []byte{}, pver, false, 8, io.ErrShortWrite, io.EOF}, // Force errors on port. {&baseNetAddr, []byte{}, pver, false, 24, io.ErrShortWrite, io.EOF}, - - // Protocol version before NetAddressTimeVersion with timestamp - // flag set (should not have timestamp due to old protocol - // version) and intentional read/write errors. - // Force errors on services. - {&baseNetAddr, []byte{}, pverNAT, true, 0, io.ErrShortWrite, io.EOF}, - // Force errors on ip. - {&baseNetAddr, []byte{}, pverNAT, true, 8, io.ErrShortWrite, io.EOF}, - // Force errors on port. - {&baseNetAddr, []byte{}, pverNAT, true, 24, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) diff --git a/wire/protocol.go b/wire/protocol.go index ef434e16..24732471 100644 --- a/wire/protocol.go +++ b/wire/protocol.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -12,35 +13,10 @@ import ( const ( // ProtocolVersion is the latest protocol version this package supports. - ProtocolVersion uint32 = 70002 - - // MultipleAddressVersion is the protocol version which added multiple - // addresses per message (pver >= MultipleAddressVersion). - MultipleAddressVersion uint32 = 209 - - // NetAddressTimeVersion is the protocol version which added the - // timestamp field (pver >= NetAddressTimeVersion). - NetAddressTimeVersion uint32 = 31402 - - // BIP0031Version is the protocol version AFTER which a pong message - // and nonce field in ping were added (pver > BIP0031Version). - BIP0031Version uint32 = 60000 - - // BIP0035Version is the protocol version which added the mempool - // message (pver >= BIP0035Version). - BIP0035Version uint32 = 60002 - - // BIP0037Version is the protocol version which added new connection - // bloom filtering related messages and extended the version message - // with a relay flag (pver >= BIP0037Version). - BIP0037Version uint32 = 70001 - - // RejectVersion is the protocol version which added a new reject - // message. - RejectVersion uint32 = 70002 + ProtocolVersion uint32 = 1 ) -// ServiceFlag identifies services supported by a bitcoin peer. +// ServiceFlag identifies services supported by a decred peer. type ServiceFlag uint64 const ( @@ -78,41 +54,41 @@ func (f ServiceFlag) String() string { return s } -// BitcoinNet represents which bitcoin network a message belongs to. -type BitcoinNet uint32 +// CurrencyNet represents which decred network a message belongs to. +type CurrencyNet uint32 -// Constants used to indicate the message bitcoin network. They can also be +// Constants used to indicate the message decred network. They can also be // used to seek to the next message when a stream's state is unknown, but // this package does not provide that functionality since it's generally a // better idea to simply disconnect clients that are misbehaving over TCP. const ( - // MainNet represents the main bitcoin network. - MainNet BitcoinNet = 0xd9b4bef9 + // MainNet represents the main decred network. + MainNet CurrencyNet = 0xd9b400f9 // TestNet represents the regression test network. - TestNet BitcoinNet = 0xdab5bffa + RegTest CurrencyNet = 0xdab500fa - // TestNet3 represents the test network (version 3). - TestNet3 BitcoinNet = 0x0709110b + // TestNet represents the test network (version 3). + TestNet CurrencyNet = 0x0709000b // SimNet represents the simulation test network. - SimNet BitcoinNet = 0x12141c16 + SimNet CurrencyNet = 0x12141c16 ) -// bnStrings is a map of bitcoin networks back to their constant names for +// bnStrings is a map of decred networks back to their constant names for // pretty printing. -var bnStrings = map[BitcoinNet]string{ - MainNet: "MainNet", - TestNet: "TestNet", - TestNet3: "TestNet3", - SimNet: "SimNet", +var bnStrings = map[CurrencyNet]string{ + MainNet: "MainNet", + TestNet: "TestNet", + RegTest: "RegNet", + SimNet: "SimNet", } -// String returns the BitcoinNet in human-readable form. -func (n BitcoinNet) String() string { +// String returns the CurrencyNet in human-readable form. +func (n CurrencyNet) String() string { if s, ok := bnStrings[n]; ok { return s } - return fmt.Sprintf("Unknown BitcoinNet (%d)", uint32(n)) + return fmt.Sprintf("Unknown CurrencyNet (%d)", uint32(n)) } diff --git a/wire/protocol_test.go b/wire/protocol_test.go index cc79f041..9117473d 100644 --- a/wire/protocol_test.go +++ b/wire/protocol_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2013-2015 The btcsuite developers +// Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +8,7 @@ package wire_test import ( "testing" - "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/wire" ) // TestServiceFlagStringer tests the stringized output for service flag types. @@ -32,17 +33,17 @@ func TestServiceFlagStringer(t *testing.T) { } } -// TestBitcoinNetStringer tests the stringized output for bitcoin net types. -func TestBitcoinNetStringer(t *testing.T) { +// TestCurrencyNetStringer tests the stringized output for decred net types. +func TestCurrencyNetStringer(t *testing.T) { tests := []struct { - in wire.BitcoinNet + in wire.CurrencyNet want string }{ {wire.MainNet, "MainNet"}, {wire.TestNet, "TestNet"}, - {wire.TestNet3, "TestNet3"}, + {wire.TestNet, "TestNet"}, {wire.SimNet, "SimNet"}, - {0xffffffff, "Unknown BitcoinNet (4294967295)"}, + {0xffffffff, "Unknown CurrencyNet (4294967295)"}, } t.Logf("Running %d tests", len(tests))