dcrd/dcrd.go
Dave Collins 25c14e046a
main: Update to use all new major module versions.
This updates all code in the main module to use the latest major modules
versions to pull in the latest updates.

A more general high level overview of the changes is provided below,
however, there is one semantic change worth calling out independently.

The verifymessage RPC will now return an error when provided with
an address that is not for the current active network and the RPC server
version has been bumped accordingly.

Previously, it would return false which indicated the signature is
invalid, even when the provided signature was actually valid for the
other network.  Said behavior was not really incorrect since the
address, signature, and message combination is in fact invalid for the
current active network, however, that result could be somewhat
misleading since a false result could easily be interpreted to mean the
signature is actually invalid altogether which is distinct from the case
of the address being for a different network.  Therefore, it is
preferable to explicitly return an error in the case of an address on
the wrong network to cleanly separate these cases.

The following is a high level overview of the changes:

- Replace all calls to removed blockchain merkle root, pow, subsidy, and
  coinbase funcs with their standalone module equivalents
  - Introduce a new local func named calcTxTreeMerkleRoot that accepts
    dcrutil.Tx as before and defers to the new standalone func
- Update block locator handling to match the new signature required by
  the peer/v2 module
  - Introduce a new local func named chainBlockLocatorToHashes which
    performs the necessary conversion
- Update all references to old v1 chaincfg params global instances to
  use the new v2 functions
- Modify all cases that parse addresses to provide the now required
  current network params
  - Include address params with the wsClientFilter
- Replace removed v1 chaincfg constants with local constants
- Create subsidy cache during server init and pass it to the relevant
  subsystems
  - blockManagerConfig
  - BlkTmplGenerator
  - rpcServer
  - VotingWallet
- Update mining code that creates the block one coinbase transaction to
  create the output scripts as defined in the v2 params
- Replace old v2 dcrjson constant references with new types module
- Fix various comment typos
- Update fees module to use the latest major module versions and bump it v2
2019-08-13 11:22:37 -05:00

249 lines
6.3 KiB
Go

// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2019 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"runtime/debug"
"runtime/pprof"
"github.com/decred/dcrd/blockchain/v2/indexers"
"github.com/decred/dcrd/internal/limits"
"github.com/decred/dcrd/internal/version"
)
var cfg *config
// winServiceMain is only invoked on Windows. It detects when dcrd is running
// as a service and reacts accordingly.
var winServiceMain func() (bool, error)
// serviceStartOfDayChan is only used by Windows when the code is running as a
// service. It signals the service code that startup has completed. Notice
// that it uses a buffered channel so the caller will not be blocked when the
// service is not running.
var serviceStartOfDayChan = make(chan *config, 1)
// dcrdMain is the real main function for dcrd. It is necessary to work around
// the fact that deferred functions do not run when os.Exit() is called.
func dcrdMain() error {
// Load configuration and parse command line. This function also
// initializes logging and configures it accordingly.
tcfg, _, err := loadConfig()
if err != nil {
return err
}
cfg = tcfg
defer func() {
if logRotator != nil {
logRotator.Close()
}
}()
// Get a context that will be canceled when a shutdown signal has been
// triggered either from an OS signal such as SIGINT (Ctrl+C) or from
// another subsystem such as the RPC server.
ctx := shutdownListener()
defer dcrdLog.Info("Shutdown complete")
// Show version and home dir at startup.
dcrdLog.Infof("Version %s (Go version %s %s/%s)", version.String(),
runtime.Version(), runtime.GOOS, runtime.GOARCH)
dcrdLog.Infof("Home dir: %s", cfg.HomeDir)
if cfg.NoFileLogging {
dcrdLog.Info("File logging disabled")
}
// Enable http profiling server if requested.
if cfg.Profile != "" {
go func() {
listenAddr := cfg.Profile
dcrdLog.Infof("Creating profiling server "+
"listening on %s", listenAddr)
profileRedirect := http.RedirectHandler("/debug/pprof",
http.StatusSeeOther)
http.Handle("/", profileRedirect)
err := http.ListenAndServe(listenAddr, nil)
if err != nil {
fatalf(err.Error())
}
}()
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
dcrdLog.Errorf("Unable to create cpu profile: %v", err.Error())
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Write mem profile if requested.
if cfg.MemProfile != "" {
f, err := os.Create(cfg.MemProfile)
if err != nil {
dcrdLog.Errorf("Unable to create mem profile: %v", err)
return err
}
defer f.Close()
defer pprof.WriteHeapProfile(f)
}
var lifetimeNotifier lifetimeEventServer
if cfg.LifetimeEvents {
lifetimeNotifier = newLifetimeEventServer(outgoingPipeMessages)
}
if cfg.PipeRx != 0 {
go serviceControlPipeRx(uintptr(cfg.PipeRx))
}
if cfg.PipeTx != 0 {
go serviceControlPipeTx(uintptr(cfg.PipeTx))
} else {
go drainOutgoingPipeMessages()
}
// Return now if a shutdown signal was triggered.
if shutdownRequested(ctx) {
return nil
}
// Load the block database.
lifetimeNotifier.notifyStartupEvent(lifetimeEventDBOpen)
db, err := loadBlockDB()
if err != nil {
dcrdLog.Errorf("%v", err)
return err
}
defer func() {
// Ensure the database is sync'd and closed on shutdown.
lifetimeNotifier.notifyShutdownEvent(lifetimeEventDBOpen)
dcrdLog.Infof("Gracefully shutting down the database...")
db.Close()
}()
// Return now if a shutdown signal was triggered.
if shutdownRequested(ctx) {
return nil
}
// Drop indexes and exit if requested.
//
// NOTE: The order is important here because dropping the tx index also
// drops the address index since it relies on it.
if cfg.DropAddrIndex {
if err := indexers.DropAddrIndex(db, ctx.Done()); err != nil {
dcrdLog.Errorf("%v", err)
return err
}
return nil
}
if cfg.DropTxIndex {
if err := indexers.DropTxIndex(db, ctx.Done()); err != nil {
dcrdLog.Errorf("%v", err)
return err
}
return nil
}
if cfg.DropExistsAddrIndex {
if err := indexers.DropExistsAddrIndex(db, ctx.Done()); err != nil {
dcrdLog.Errorf("%v", err)
return err
}
return nil
}
if cfg.DropCFIndex {
if err := indexers.DropCfIndex(db, ctx.Done()); err != nil {
dcrdLog.Errorf("%v", err)
return err
}
return nil
}
// Create server and start it.
lifetimeNotifier.notifyStartupEvent(lifetimeEventP2PServer)
server, err := newServer(cfg.Listeners, db, activeNetParams.Params,
cfg.DataDir, ctx.Done())
if err != nil {
// TODO(oga) this logging could do with some beautifying.
dcrdLog.Errorf("Unable to start server on %v: %v",
cfg.Listeners, err)
return err
}
defer func() {
lifetimeNotifier.notifyShutdownEvent(lifetimeEventP2PServer)
dcrdLog.Infof("Gracefully shutting down the server...")
server.Stop()
server.WaitForShutdown()
srvrLog.Infof("Server shutdown complete")
}()
server.Start()
if shutdownRequested(ctx) {
return nil
}
lifetimeNotifier.notifyStartupComplete()
// Signal the Windows service (if running) that startup has completed.
serviceStartOfDayChan <- cfg
// Wait until the interrupt signal is received from an OS signal or
// shutdown is requested through one of the subsystems such as the RPC
// server.
<-ctx.Done()
return nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Block and transaction processing can cause bursty allocations. This
// limits the garbage collector from excessively overallocating during
// bursts. This value was arrived at with the help of profiling live
// usage.
debug.SetGCPercent(20)
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %v\n", err)
os.Exit(1)
}
// Call serviceMain on Windows to handle running as a service. When
// the return isService flag is true, exit now since we ran as a
// service. Otherwise, just fall through to normal operation.
if runtime.GOOS == "windows" {
isService, err := winServiceMain()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if isService {
os.Exit(0)
}
}
// Work around defer not working after os.Exit()
if err := dcrdMain(); err != nil {
os.Exit(1)
}
}