mirror of
https://github.com/FlipsideCrypto/dcrd.git
synced 2026-02-06 10:56:47 +00:00
The legacy ticket database, which was GOB serialized and stored on shut down, has been removed. Ticket state information is now held in a stake node, which acts as a modularized "black box" to contain all information about the state of the stake system. Stake nodes are now a component of the blockchain blockNode struct, and are updated with them. Stake nodes, like their internal treap primitives, are immutable objects that are created with their connect and disconnect node functions. The blockchain database now stores all information about the stake state of the best node in the block database. The blockchain makes the assumption that the stake state of the best node is known at any given time. If the states of former blocks or sidechains must be evaluated, this can be achieved by iterating backwards along the blockchain from the best node, and then connecting stake nodes iteratively if necessary. Performance improvements with this new module are dramatic. The long delays on start up and shut down are removed. Blockchain synchronization time is improved approximately 5-10x on the mainnet chain. The state of the database is atomic, so unexpected shut downs should no longer have the ability to disrupt the chain state. An upgrade path has been added for version 1 blockchain databases. Users with this blockchain database will automatically update when they start their clients.
232 lines
5.6 KiB
Go
232 lines
5.6 KiB
Go
// Copyright (c) 2013-2016 The btcsuite developers
|
|
// Copyright (c) 2015-2016 The Decred developers
|
|
// Use of this source code is governed by an ISC
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package main
|
|
|
|
import (
|
|
"fmt"
|
|
"net"
|
|
"net/http"
|
|
_ "net/http/pprof"
|
|
"os"
|
|
"runtime"
|
|
"runtime/debug"
|
|
"runtime/pprof"
|
|
"time"
|
|
|
|
"github.com/decred/dcrd/blockchain/indexers"
|
|
"github.com/decred/dcrd/limits"
|
|
)
|
|
|
|
var cfg *config
|
|
|
|
// winServiceMain is only invoked on Windows. It detects when dcrd is running
|
|
// as a service and reacts accordingly.
|
|
var winServiceMain func() (bool, error)
|
|
|
|
// dcrdMain is the real main function for dcrd. It is necessary to work around
|
|
// the fact that deferred functions do not run when os.Exit() is called. The
|
|
// optional serverChan parameter is mainly used by the service code to be
|
|
// notified with the server once it is setup so it can gracefully stop it when
|
|
// requested from the service control manager.
|
|
func dcrdMain(serverChan chan<- *server) error {
|
|
// Load configuration and parse command line. This function also
|
|
// initializes logging and configures it accordingly.
|
|
tcfg, _, err := loadConfig()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg = tcfg
|
|
defer backendLog.Flush()
|
|
|
|
interrupted := interruptListener()
|
|
defer dcrdLog.Info("Shutdown complete")
|
|
|
|
// Show version at startup.
|
|
dcrdLog.Infof("Version %s", version())
|
|
// Show dcrd home dir location
|
|
dcrdLog.Infof("Home dir: %s", cfg.HomeDir)
|
|
|
|
// Enable http profiling server if requested.
|
|
if cfg.Profile != "" {
|
|
go func() {
|
|
listenAddr := net.JoinHostPort("", cfg.Profile)
|
|
dcrdLog.Infof("Creating profiling server "+
|
|
"listening on %s", listenAddr)
|
|
profileRedirect := http.RedirectHandler("/debug/pprof",
|
|
http.StatusSeeOther)
|
|
http.Handle("/", profileRedirect)
|
|
err := http.ListenAndServe(listenAddr, nil)
|
|
if err != nil {
|
|
fatalf(err.Error())
|
|
}
|
|
}()
|
|
}
|
|
|
|
// Write cpu profile if requested.
|
|
if cfg.CPUProfile != "" {
|
|
f, err := os.Create(cfg.CPUProfile)
|
|
if err != nil {
|
|
dcrdLog.Errorf("Unable to create cpu profile: %v", err.Error())
|
|
return err
|
|
}
|
|
pprof.StartCPUProfile(f)
|
|
defer f.Close()
|
|
defer pprof.StopCPUProfile()
|
|
}
|
|
|
|
// Write mem profile if requested.
|
|
if cfg.MemProfile != "" {
|
|
f, err := os.Create(cfg.MemProfile)
|
|
if err != nil {
|
|
dcrdLog.Errorf("Unable to create cpu profile: %v", err)
|
|
return err
|
|
}
|
|
timer := time.NewTimer(time.Minute * 20) // 20 minutes
|
|
go func() {
|
|
<-timer.C
|
|
pprof.WriteHeapProfile(f)
|
|
f.Close()
|
|
}()
|
|
}
|
|
|
|
var lifetimeNotifier lifetimeEventServer
|
|
if cfg.LifetimeEvents {
|
|
lifetimeNotifier = newLifetimeEventServer(outgoingPipeMessages)
|
|
}
|
|
|
|
if cfg.PipeRx != 0 {
|
|
go serviceControlPipeRx(uintptr(cfg.PipeRx))
|
|
}
|
|
if cfg.PipeTx != 0 {
|
|
go serviceControlPipeTx(uintptr(cfg.PipeTx))
|
|
} else {
|
|
go drainOutgoingPipeMessages()
|
|
}
|
|
|
|
if interruptRequested(interrupted) {
|
|
return nil
|
|
}
|
|
|
|
if interruptRequested(interrupted) {
|
|
return nil
|
|
}
|
|
|
|
// Load the block database.
|
|
lifetimeNotifier.notifyStartupEvent(lifetimeEventDBOpen)
|
|
db, err := loadBlockDB()
|
|
if err != nil {
|
|
dcrdLog.Errorf("%v", err)
|
|
return err
|
|
}
|
|
defer func() {
|
|
lifetimeNotifier.notifyShutdownEvent(lifetimeEventDBOpen)
|
|
dcrdLog.Infof("Gracefully shutting down the database...")
|
|
db.Close()
|
|
}()
|
|
|
|
if interruptRequested(interrupted) {
|
|
return nil
|
|
}
|
|
|
|
// Drop indexes and exit if requested.
|
|
//
|
|
// NOTE: The order is important here because dropping the tx index also
|
|
// drops the address index since it relies on it.
|
|
if cfg.DropAddrIndex {
|
|
if err := indexers.DropAddrIndex(db); err != nil {
|
|
dcrdLog.Errorf("%v", err)
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
if cfg.DropTxIndex {
|
|
if err := indexers.DropTxIndex(db); err != nil {
|
|
dcrdLog.Errorf("%v", err)
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
if cfg.DropExistsAddrIndex {
|
|
if err := indexers.DropExistsAddrIndex(db); err != nil {
|
|
dcrdLog.Errorf("%v", err)
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Create server and start it.
|
|
lifetimeNotifier.notifyStartupEvent(lifetimeEventP2PServer)
|
|
server, err := newServer(cfg.Listeners, db, activeNetParams.Params)
|
|
if err != nil {
|
|
// TODO(oga) this logging could do with some beautifying.
|
|
dcrdLog.Errorf("Unable to start server on %v: %v",
|
|
cfg.Listeners, err)
|
|
return err
|
|
}
|
|
defer func() {
|
|
lifetimeNotifier.notifyShutdownEvent(lifetimeEventP2PServer)
|
|
dcrdLog.Infof("Gracefully shutting down the server...")
|
|
server.Stop()
|
|
server.WaitForShutdown()
|
|
srvrLog.Infof("Server shutdown complete")
|
|
}()
|
|
|
|
server.Start()
|
|
if serverChan != nil {
|
|
serverChan <- server
|
|
}
|
|
|
|
if interruptRequested(interrupted) {
|
|
return nil
|
|
}
|
|
|
|
lifetimeNotifier.notifyStartupComplete()
|
|
|
|
// Wait until the interrupt signal is received from an OS signal or
|
|
// shutdown is requested through the RPC server.
|
|
<-interrupted
|
|
return nil
|
|
}
|
|
|
|
func main() {
|
|
// Use all processor cores.
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
|
|
// Block and transaction processing can cause bursty allocations. This
|
|
// limits the garbage collector from excessively overallocating during
|
|
// bursts. This value was arrived at with the help of profiling live
|
|
// usage.
|
|
debug.SetGCPercent(20)
|
|
|
|
// Up some limits.
|
|
if err := limits.SetLimits(); err != nil {
|
|
fmt.Fprintf(os.Stderr, "failed to set limits: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Call serviceMain on Windows to handle running as a service. When
|
|
// the return isService flag is true, exit now since we ran as a
|
|
// service. Otherwise, just fall through to normal operation.
|
|
if runtime.GOOS == "windows" {
|
|
isService, err := winServiceMain()
|
|
if err != nil {
|
|
fmt.Println(err)
|
|
os.Exit(1)
|
|
}
|
|
if isService {
|
|
os.Exit(0)
|
|
}
|
|
}
|
|
|
|
// Work around defer not working after os.Exit()
|
|
if err := dcrdMain(nil); err != nil {
|
|
os.Exit(1)
|
|
}
|
|
}
|