Fix the dumpblockchain function (#405)

The dumpblockchain function used to serialize a map of block
into gob serialized format, which was used for testing but which
was incompatible with the addblock tool.  The function now dumps
a flat file the the same format required by the addblock tool.

A couple shutdown assertions were added as well, to prevent
potential panics if pointers were nil.  The duration of time
it took to sync the blockchain with addblock is now
reported.
This commit is contained in:
C Jepson 2016-10-10 12:35:46 -04:00 committed by Alex Yocom-Piatt
parent e206421edd
commit 393c48d32e
5 changed files with 66 additions and 23 deletions

View File

@ -6,11 +6,9 @@
package main
import (
"bytes"
"container/list"
"encoding/gob"
"encoding/binary"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
@ -2762,11 +2760,12 @@ func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockMan
// Dump the blockchain here if asked for it, and quit.
if cfg.DumpBlockchain != "" {
err = dumpBlockChain(best.Height, s.db)
err = dumpBlockChain(bm.chain, best.Height)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Block database dump to map completed, closing.")
return nil, fmt.Errorf("closing after dumping blockchain")
}
// Query the DB for the current winning ticket data.
@ -2896,24 +2895,60 @@ func loadBlockDB() (database.DB, error) {
}
// dumpBlockChain dumps a map of the blockchain blocks as serialized bytes.
func dumpBlockChain(height int64, db database.DB) error {
blockchain, err := blockchain.DumpBlockChain(db, height)
func dumpBlockChain(b *blockchain.BlockChain, height int64) error {
bmgrLog.Infof("Writing the blockchain to disk as a flat file, " +
"please wait...")
progressLogger := newBlockProgressLogger("Written", bmgrLog)
file, err := os.Create(cfg.DumpBlockchain)
if err != nil {
return err
}
defer file.Close()
// Serialize the map into a buffer
w := new(bytes.Buffer)
encoder := gob.NewEncoder(w)
if err := encoder.Encode(blockchain); err != nil {
return err
// Store the network ID in an array for later writing.
var net [4]byte
binary.LittleEndian.PutUint32(net[:], uint32(activeNetParams.Net))
// Write the blocks sequentially, excluding the genesis block.
var sz [4]byte
for i := int64(1); i <= height; i++ {
bl, err := b.BlockByHeight(i)
if err != nil {
return err
}
// Serialize the block for writing.
blB, err := bl.Bytes()
if err != nil {
return err
}
// Write the network ID first.
_, err = file.Write(net[:])
if err != nil {
return err
}
// Write the size of the block as a little endian uint32,
// then write the block itself serialized.
binary.LittleEndian.PutUint32(sz[:], uint32(len(blB)))
_, err = file.Write(sz[:])
if err != nil {
return err
}
_, err = file.Write(blB)
if err != nil {
return err
}
progressLogger.logBlockHeight(bl)
}
// Write the buffer to disk
err = ioutil.WriteFile(cfg.DumpBlockchain, w.Bytes(), 0664)
if err != nil {
return err
}
bmgrLog.Infof("Successfully dumped the blockchain (%v blocks) to %v.",
height, cfg.DumpBlockchain)
return nil
}

View File

@ -114,8 +114,9 @@ func realMain() error {
}
log.Infof("Processed a total of %d blocks (%d imported, %d already "+
"known)", results.blocksProcessed, results.blocksImported,
results.blocksProcessed-results.blocksImported)
"known) in %v", results.blocksProcessed, results.blocksImported,
results.blocksProcessed-results.blocksImported, results.duration)
return nil
}

View File

@ -26,6 +26,7 @@ var zeroHash = chainhash.Hash{}
type importResults struct {
blocksProcessed int64
blocksImported int64
duration time.Duration
err error
}
@ -48,6 +49,7 @@ type blockImporter struct {
lastHeight int64
lastBlockTime time.Time
lastLogTime time.Time
startTime time.Time
}
// readBlock reads the next block from the input file.
@ -257,6 +259,7 @@ func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
duration: time.Now().Sub(bi.startTime),
err: err,
}
close(bi.quit)
@ -266,6 +269,7 @@ func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
duration: time.Now().Sub(bi.startTime),
err: nil,
}
}
@ -352,5 +356,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
chain: chain,
medianTime: blockchain.NewMedianTime(),
lastLogTime: time.Now(),
startTime: time.Now(),
}, nil
}

View File

@ -129,7 +129,7 @@ type config struct {
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
MemProfile string `long:"memprofile" description:"Write mem profile to the specified file"`
DumpBlockchain string `long:"dumpblockchain" description:"Write blockchain as a gob-encoded map to the specified file"`
DumpBlockchain string `long:"dumpblockchain" description:"Write blockchain as a flat file of blocks for use with addblock, to the specified filename"`
MiningTimeOffset int `long:"miningtimeoffset" description:"Offset the mining timestamp of a block by this many seconds (positive values are in the past)"`
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify <subsystem>=<level>,<subsystem2>=<level>,... to set the log level for individual subsystems -- Use show to list available subsystems"`
Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"`

View File

@ -2266,11 +2266,13 @@ func (s *server) Stop() error {
}
}
// Stop the CPU miner if needed
s.cpuMiner.Stop()
// Stop the CPU miner if needed.
if cfg.Generate && s.cpuMiner != nil {
s.cpuMiner.Stop()
}
// Shutdown the RPC server if it's not disabled.
if !cfg.DisableRPC {
if !cfg.DisableRPC && s.rpcServer != nil {
s.rpcServer.Stop()
}