dcrd/rpcserver_test.go
Dave Collins 25c14e046a
main: Update to use all new major module versions.
This updates all code in the main module to use the latest major modules
versions to pull in the latest updates.

A more general high level overview of the changes is provided below,
however, there is one semantic change worth calling out independently.

The verifymessage RPC will now return an error when provided with
an address that is not for the current active network and the RPC server
version has been bumped accordingly.

Previously, it would return false which indicated the signature is
invalid, even when the provided signature was actually valid for the
other network.  Said behavior was not really incorrect since the
address, signature, and message combination is in fact invalid for the
current active network, however, that result could be somewhat
misleading since a false result could easily be interpreted to mean the
signature is actually invalid altogether which is distinct from the case
of the address being for a different network.  Therefore, it is
preferable to explicitly return an error in the case of an address on
the wrong network to cleanly separate these cases.

The following is a high level overview of the changes:

- Replace all calls to removed blockchain merkle root, pow, subsidy, and
  coinbase funcs with their standalone module equivalents
  - Introduce a new local func named calcTxTreeMerkleRoot that accepts
    dcrutil.Tx as before and defers to the new standalone func
- Update block locator handling to match the new signature required by
  the peer/v2 module
  - Introduce a new local func named chainBlockLocatorToHashes which
    performs the necessary conversion
- Update all references to old v1 chaincfg params global instances to
  use the new v2 functions
- Modify all cases that parse addresses to provide the now required
  current network params
  - Include address params with the wsClientFilter
- Replace removed v1 chaincfg constants with local constants
- Create subsidy cache during server init and pass it to the relevant
  subsystems
  - blockManagerConfig
  - BlkTmplGenerator
  - rpcServer
  - VotingWallet
- Update mining code that creates the block one coinbase transaction to
  create the output scripts as defined in the v2 params
- Replace old v2 dcrjson constant references with new types module
- Fix various comment typos
- Update fees module to use the latest major module versions and bump it v2
2019-08-13 11:22:37 -05:00

168 lines
4.6 KiB
Go

// Copyright (c) 2016 The btcsuite developers
// Copyright (c) 2017-2019 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular tests due to the following build tag.
// +build rpctest
package main
import (
"bytes"
"fmt"
"os"
"runtime/debug"
"testing"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/rpctest"
)
func testGetBestBlock(r *rpctest.Harness, t *testing.T) {
_, prevbestHeight, err := r.Node.GetBestBlock()
if err != nil {
t.Fatalf("Call to `getbestblock` failed: %v", err)
}
// Create a new block connecting to the current tip.
generatedBlockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
bestHash, bestHeight, err := r.Node.GetBestBlock()
if err != nil {
t.Fatalf("Call to `getbestblock` failed: %v", err)
}
// Hash should be the same as the newly submitted block.
if !bytes.Equal(bestHash[:], generatedBlockHashes[0][:]) {
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
"hash %v", bestHash, generatedBlockHashes[0])
}
// Block height should now reflect newest height.
if bestHeight != prevbestHeight+1 {
t.Fatalf("Block heights do not match. Got %v, wanted %v",
bestHeight, prevbestHeight+1)
}
}
func testGetBlockCount(r *rpctest.Harness, t *testing.T) {
// Save the current count.
currentCount, err := r.Node.GetBlockCount()
if err != nil {
t.Fatalf("Unable to get block count: %v", err)
}
if _, err := r.Node.Generate(1); err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
// Count should have increased by one.
newCount, err := r.Node.GetBlockCount()
if err != nil {
t.Fatalf("Unable to get block count: %v", err)
}
if newCount != currentCount+1 {
t.Fatalf("Block count incorrect. Got %v should be %v",
newCount, currentCount+1)
}
}
func testGetBlockHash(r *rpctest.Harness, t *testing.T) {
// Create a new block connecting to the current tip.
generatedBlockHashes, err := r.Node.Generate(1)
if err != nil {
t.Fatalf("Unable to generate block: %v", err)
}
info, err := r.Node.GetInfo()
if err != nil {
t.Fatalf("call to getinfo cailed: %v", err)
}
blockHash, err := r.Node.GetBlockHash(int64(info.Blocks))
if err != nil {
t.Fatalf("Call to `getblockhash` failed: %v", err)
}
// Block hashes should match newly created block.
if !bytes.Equal(generatedBlockHashes[0][:], blockHash[:]) {
t.Fatalf("Block hashes do not match. Returned hash %v, wanted "+
"hash %v", blockHash, generatedBlockHashes[0])
}
}
var rpcTestCases = []rpctest.HarnessTestCase{
testGetBestBlock,
testGetBlockCount,
testGetBlockHash,
}
var primaryHarness *rpctest.Harness
func TestMain(m *testing.M) {
// In order to properly test scenarios on as if we were on mainnet,
// ensure that non-standard transactions aren't accepted into the
// mempool or relayed.
args := []string{"--rejectnonstd"}
harness, err := rpctest.New(chaincfg.RegNetParams(), nil, args)
if err != nil {
fmt.Println("unable to create primary harness: ", err)
os.Exit(1)
}
primaryHarness = harness
// Initialize the primary mining node with a chain of length 125,
// providing 25 mature coinbases to allow spending from for testing
// purposes.
if err := primaryHarness.SetUp(true, 25); err != nil {
fmt.Println("unable to setup test chain: ", err)
// Even though the harness was not fully setup, it still needs
// to be torn down to ensure all resources such as temp
// directories are cleaned up. The error is intentionally
// ignored since this is already an error path and nothing else
// could be done about it anyways.
_ = primaryHarness.TearDown()
os.Exit(1)
}
exitCode := m.Run()
// Clean up any active harnesses that are still currently running.This
// includes removing all temporary directories, and shutting down any
// created processes.
if err := rpctest.TearDownAll(); err != nil {
fmt.Println("unable to tear down all harnesses: ", err)
os.Exit(1)
}
os.Exit(exitCode)
}
func TestRpcServer(t *testing.T) {
var currentTestNum int
defer func() {
// If one of the integration tests caused a panic within the main
// goroutine, then tear down all the harnesses in order to avoid
// any leaked dcrd processes.
if r := recover(); r != nil {
fmt.Println("recovering from test panic: ", r)
if err := rpctest.TearDownAll(); err != nil {
fmt.Println("unable to tear down all harnesses: ", err)
}
t.Fatalf("test #%v panicked: %s", currentTestNum, debug.Stack())
}
}()
for _, testCase := range rpcTestCases {
testCase(primaryHarness, t)
currentTestNum++
}
}