multi: Fix several misspellings in the comments.

Contains the following upstream commits:
- ef9c50be57
- eb882f39f8

In addition to merging the fixes in the commits, this also fixes a few
more misspellings that were introduced in the new Decred code.
This commit is contained in:
Dave Collins 2016-05-30 12:24:00 -05:00
commit 2030b4d057
44 changed files with 105 additions and 105 deletions

View File

@ -886,7 +886,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
addrKey := NetAddressKey(addr)
oldBucket := -1
for i := range a.addrNew {
// we check for existance so we can record the first one
// we check for existence so we can record the first one
if _, ok := a.addrNew[i][addrKey]; ok {
delete(a.addrNew[i], addrKey)
ka.refs--

View File

@ -17,7 +17,7 @@ import (
"github.com/decred/dcrd/wire"
)
// naTest is used to describe a test to be perfomed against the NetAddressKey
// naTest is used to describe a test to be performed against the NetAddressKey
// method.
type naTest struct {
in wire.NetAddress

View File

@ -91,7 +91,7 @@ func IsFinalizedTransaction(tx *dcrutil.Tx, blockHeight int64,
return true
}
// At this point, the transaction's lock time hasn't occured yet, but
// At this point, the transaction's lock time hasn't occurred yet, but
// the transaction might still be finalized if the sequence number
// for all transaction inputs is maxed out.
for _, txIn := range msgTx.TxIn {

View File

@ -142,7 +142,7 @@ func BigToCompact(n *big.Int) uint32 {
// CalcWork calculates a work value from difficulty bits. Decred increases
// the difficulty for generating a block by decreasing the value which the
// generated hash must be less than. This difficulty target is stored in each
// block header using a compact representation as described in the documenation
// block header using a compact representation as described in the documentation
// for CompactToBig. The main chain is selected by choosing the chain that has
// the most proof of work (highest difficulty). Since a lower target difficulty
// value equates to higher actual difficulty, the work value which will be

View File

@ -25,7 +25,7 @@ func TstTimeSorter(times []time.Time) sort.Interface {
}
// TstSetMaxMedianTimeEntries makes the ability to set the maximum number of
// median tiem entries available to the test package.
// median time entries available to the test package.
func TstSetMaxMedianTimeEntries(val int) {
maxMedianTimeEntries = val
}

View File

@ -562,7 +562,7 @@ func (tmdb *TicketDB) removeLiveTicket(ticket *TicketData) error {
"delete does not exist!", ticket.SStxHash)
}
// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.ticketMap[ticket.Prefix][ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeLiveTicket: ticket " +
@ -590,7 +590,7 @@ func (tmdb *TicketDB) removeSpentTicket(spendHeight int64, ticket *TicketData) e
"delete does not exist! %v", ticket.SStxHash)
}
// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.spentTicketMap[spendHeight][ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeSpentTicket: ticket hash " +
@ -616,7 +616,7 @@ func (tmdb *TicketDB) removeMissedTicket(ticket *TicketData) error {
"delete does not exist! %v", ticket.SStxHash)
}
// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision
if *tmdb.maps.missedTicketMap[ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeMissedTicket: ticket hash " +
@ -643,7 +643,7 @@ func (tmdb *TicketDB) removeRevokedTicket(ticket *TicketData) error {
"delete does not exist! %v", ticket.SStxHash)
}
// Make sure that the tickets are indentical in the unlikely case of a hash
// Make sure that the tickets are identical in the unlikely case of a hash
// collision.
if *tmdb.maps.revokedTicketMap[ticket.SStxHash] != *ticket {
return fmt.Errorf("TicketDB err @ removeRevokedTicket: ticket hash " +
@ -721,7 +721,7 @@ func (tmdb *TicketDB) CheckLiveTicket(txHash chainhash.Hash) (bool, error) {
}
// CheckMissedTicket checks for the existence of a missed ticket in the missed
// ticket map. Assumes missedTicketMap is intialized.
// ticket map. Assumes missedTicketMap is initialized.
//
// This function is safe for concurrent access.
func (tmdb *TicketDB) CheckMissedTicket(txHash chainhash.Hash) bool {
@ -735,7 +735,7 @@ func (tmdb *TicketDB) CheckMissedTicket(txHash chainhash.Hash) bool {
}
// CheckRevokedTicket checks for the existence of a revoked ticket in the
// revoked ticket map. Assumes missedTicketMap is intialized.
// revoked ticket map. Assumes missedTicketMap is initialized.
//
// This function is safe for concurrent access.
func (tmdb *TicketDB) CheckRevokedTicket(txHash chainhash.Hash) bool {
@ -1306,7 +1306,7 @@ func (tmdb *TicketDB) unspendTickets(height int64) (SStxMemMap, error) {
// getNewTicketsFromHeight loads a block from leveldb and parses SStx from it using
// chain/stake's IsSStx function.
// This is intended to be used to get ticket numbers from the MAIN CHAIN as
// decribed in the DB.
// described in the DB.
// SIDE CHAIN evaluation should be instantiated in package:chain.
//
// This function MUST be called with the tmdb lock held (for reads).

View File

@ -1371,10 +1371,10 @@ func CheckTransactionInputs(tx *dcrutil.Tx, txHeight int64, txStore TxStore,
// ----------------------------------------------------------------------------
// SSTX -----------------------------------------------------------------------
// 1. Check and make sure that the output amounts in the committments to the
// 1. Check and make sure that the output amounts in the commitments to the
// ticket are correctly calculated.
// 1. Check and make sure that the output amounts in the committments to the
// 1. Check and make sure that the output amounts in the commitments to the
// ticket are correctly calculated.
isSStx, _ := stake.IsSStx(tx)
if isSStx {

View File

@ -2940,7 +2940,7 @@ func newBlockManager(s *server) (*blockManager, error) {
}
bmgrLog.Infof("Block index generation complete")
// Initialize the chain state now that the intial block node index has
// Initialize the chain state now that the initial block node index has
// been generated.
// Query the DB for the current winning ticket data.

View File

@ -33,9 +33,9 @@ const (
// When in "CatchUp" mode, incoming requests to index newly solved
// blocks are backed up for later processing. Once we've finished
// catching up, we process these queued jobs, and then enter into
// "maintainence" mode.
// "maintenance" mode.
indexCatchUp indexState = iota
// When in "maintainence" mode, we have a single worker serially
// When in "maintenance" mode, we have a single worker serially
// processing incoming jobs to index newly solved blocks.
indexMaintain
)

View File

@ -198,7 +198,7 @@ func supportedSubsystems() []string {
subsystems = append(subsystems, subsysID)
}
// Sort the subsytems for stable display.
// Sort the subsystems for stable display.
sort.Strings(subsystems)
return subsystems
}

View File

@ -45,7 +45,7 @@ type Db interface {
// DropAfterBlockBySha will remove any blocks from the database after
// the given block. It terminates any existing transaction and performs
// its operations in an atomic transaction which is commited before
// its operations in an atomic transaction which is committed before
// the function returns.
DropAfterBlockBySha(*chainhash.Hash) (err error)
@ -126,7 +126,7 @@ type Db interface {
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Addresses are indexed by the raw bytes of their base58 decoded
// hash160.
UpdateAddrIndexForBlock(blkSha *chainhash.Hash, height int64,
@ -219,7 +219,7 @@ func AddDBDriver(instance DriverDB) {
driverList = append(driverList, instance)
}
// CreateDB intializes and opens a database.
// CreateDB initializes and opens a database.
func CreateDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
for _, drv := range driverList {
if drv.DbType == dbtype {

View File

@ -552,7 +552,7 @@ func (db *LevelDb) FetchTxsForAddr(addr dcrutil.Address, skip int,
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// transaction which is committed before the function returns.
// Transactions indexed by address are stored with the following format:
// * prefix || hash160 || blockHeight || txoffset || txlen
// Indexes are stored purely in the key, with blank data for the actual value

View File

@ -47,7 +47,7 @@ below.
Transactions
The Tx interface provides facilities for rolling back or commiting changes that
The Tx interface provides facilities for rolling back or committing changes that
took place while the transaction was active. It also provides the root metadata
bucket under which all keys, values, and nested buckets are stored. A
transaction can either be read-only or read-write and managed or unmanaged.

View File

@ -59,9 +59,9 @@ func SupportedDrivers() []string {
return supportedDBs
}
// Create intializes and opens a database for the specified type. The arguments
// are specific to the database type driver. See the documentation for the
// database driver for further details.
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {

View File

@ -330,7 +330,7 @@ func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
return false
}
// Ensure foward iteration works as expected after seeking.
// Ensure forward iteration works as expected after seeking.
middleIdx := (len(sortedValues) - 1) / 2
seekKey := sortedValues[middleIdx].key
curIdx = middleIdx
@ -650,7 +650,7 @@ func testMetadataManualTxInterface(tc *testContext) bool {
//
// Otherwise, a read-write transaction is created, the values are
// written, standard bucket tests for read-write transactions are
// performed, and then the transaction is either commited or rolled
// performed, and then the transaction is either committed or rolled
// back depending on the flag.
bucket1Name := []byte("bucket1")
populateValues := func(writable, rollback bool, putValues []keyPair) bool {

View File

@ -451,7 +451,7 @@ type DB interface {
// Update invokes the passed function in the context of a managed
// read-write transaction. Any errors returned from the user-supplied
// function will cause the transaction to be rolled back and are
// returned from this function. Otherwise, the transaction is commited
// returned from this function. Otherwise, the transaction is committed
// when the user-supplied function returns a nil error.
//
// Calling Rollback or Commit on the transaction passed to the

View File

@ -61,8 +61,9 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
// HMAC [32]byte
// }
//
// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly,
// refer to section 5.8.1 of ANSI X9.63 for rationale on this format.
// The primary aim is to ensure byte compatibility with Pyelliptic.
// Additionally, refer to section 5.8.1 of ANSI X9.63 for rationale on this
// format.
func Encrypt(curve *TwistedEdwardsCurve, pubkey *PublicKey, in []byte) ([]byte,
error) {
ephemeral, err := GeneratePrivateKey(curve)

View File

@ -54,7 +54,7 @@ func CombinePubkeys(curve *TwistedEdwardsCurve,
}
// generateNoncePair deterministically generate a nonce pair for use in
// partial signing of a message. Returns a public key (nonce to disseminate)
// partial signing of a message. Returns a public key (nonce to dissemanate)
// and a private nonce to keep as a secret for the signer.
func generateNoncePair(curve *TwistedEdwardsCurve, msg []byte, priv []byte,
nonceFunction func(*TwistedEdwardsCurve, []byte, []byte, []byte,

View File

@ -66,8 +66,8 @@ func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
// HMAC [32]byte
// }
//
// The primary aim is to ensure byte compatibility with Pyelliptic. Additionaly,
// refer to section 5.8.1 of ANSI X9.63 for rationale on this format.
// The primary aim is to ensure byte compatibility with Pyelliptic. Also, refer
// to section 5.8.1 of ANSI X9.63 for rationale on this format.
func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) {
ephemeral, err := GeneratePrivateKey(S256())
if err != nil {

View File

@ -42,7 +42,7 @@ package secp256k1
// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a
// reasonable choice for #2
// 4) Given the need for 256-bits of precision and the properties stated in #1,
// #2, and #3, the representation which best accomodates this is 10 uint32s
// #2, and #3, the representation which best accommodates this is 10 uint32s
// with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22
// bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for
// overflow

View File

@ -27,7 +27,7 @@ func isOdd(a *big.Int) bool {
// the solution to use.
func DecompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
// TODO(oga) This will probably only work for secp256k1 due to
// optimisations.
// optimizations.
// Y = +-sqrt(x^3 + B)
x3 := new(big.Int).Mul(x, x)

View File

@ -70,7 +70,7 @@ func nonceRFC6979(privkey []byte, hash []byte, extra []byte,
}
// generateNoncePair deterministically generate a nonce pair for use in
// partial signing of a message. Returns a public key (nonce to disseminate)
// partial signing of a message. Returns a public key (nonce to dissemanate)
// and a private nonce to keep as a secret for the signer.
func generateNoncePair(curve *secp256k1.KoblitzCurve, msg []byte, priv []byte,
nonceFunction func([]byte, []byte, []byte, []byte) []byte, extra []byte,

View File

@ -244,7 +244,7 @@ type GetNetTotalsResult struct {
TimeMillis int64 `json:"timemillis"`
}
// ScriptSig models a signature script. It is defined seperately since it only
// ScriptSig models a signature script. It is defined separately since it only
// applies to non-coinbase. Therefore the field in the Vin structure needs
// to be a pointer.
type ScriptSig struct {
@ -252,7 +252,7 @@ type ScriptSig struct {
Hex string `json:"hex"`
}
// Vin models parts of the tx data. It is defined seperately since
// Vin models parts of the tx data. It is defined separately since
// getrawtransaction, decoderawtransaction, and searchrawtransaction use the
// same structure.
type Vin struct {
@ -375,7 +375,7 @@ func (v *VinPrevOut) MarshalJSON() ([]byte, error) {
return json.Marshal(txStruct)
}
// Vout models parts of the tx data. It is defined seperately since both
// Vout models parts of the tx data. It is defined separately since both
// getrawtransaction and decoderawtransaction use the same structure.
type Vout struct {
Value float64 `json:"value"`

View File

@ -66,7 +66,7 @@ func subStructUsage(structType reflect.Type) string {
}
// Create the name/value entry for the field while considering
// the type of the field. Not all possibile types are covered
// the type of the field. Not all possible types are covered
// here and when one of the types not specifically covered is
// encountered, the field name is simply reused for the value.
fieldName := strings.ToLower(rtf.Name)

View File

@ -12,7 +12,7 @@ import (
"github.com/decred/dcrd/dcrjson"
)
// TestCmdMethod tests the CmdMethod function to ensure it retuns the expected
// TestCmdMethod tests the CmdMethod function to ensure it returns the expected
// methods and errors.
func TestCmdMethod(t *testing.T) {
t.Parallel()

View File

@ -254,7 +254,7 @@ func TestRegisteredCmdMethods(t *testing.T) {
t.Fatal("RegisteredCmdMethods: no methods")
}
// Ensure the returnd methods are sorted.
// Ensure the returned methods are sorted.
sortedMethods := make([]string, len(methods))
copy(sortedMethods, methods)
sort.Sort(sort.StringSlice(sortedMethods))

View File

@ -50,7 +50,7 @@ var (
)
// torLookupIP uses Tor to resolve DNS via the SOCKS extension they provide for
// resolution over the Tor network. Tor itself doesnt support ipv6 so this
// resolution over the Tor network. Tor itself doesn't support ipv6 so this
// doesn't either.
func torLookupIP(host, proxy string) ([]net.IP, error) {
conn, err := net.Dial("tcp", proxy)

View File

@ -125,7 +125,7 @@ func (s *dynamicBanScore) int(t time.Time) uint32 {
// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third paramter. The
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.

4
log.go
View File

@ -30,7 +30,7 @@ const (
maxRejectReasonLen = 250
)
// Loggers per subsytem. Note that backendLog is a seelog logger that all of
// Loggers per subsystem. Note that backendLog is a seelog logger that all of
// the subsystem loggers route their messages to. When adding new subsystems,
// add a reference here, to the subsystemLoggers map, and the useLogger
// function.
@ -146,7 +146,7 @@ func useLogger(subsystemID string, logger btclog.Logger) {
}
// initSeelogLogger initializes a new seelog logger that is used as the backend
// for all logging subsytems.
// for all logging subsystems.
func initSeelogLogger(logFile string) {
config := `
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000"

View File

@ -1021,7 +1021,7 @@ func (mp *txMemPool) FetchTransaction(txHash *chainhash.Hash) (*dcrutil.Tx,
// FilterTransactionsByAddress returns all transactions currently in the
// mempool that either create an output to the passed address or spend a
// previously created ouput to the address.
// previously created output to the address.
func (mp *txMemPool) FilterTransactionsByAddress(
addr dcrutil.Address) ([]*dcrutil.Tx, error) {
// Protect concurrent access.
@ -1544,7 +1544,7 @@ func (mp *txMemPool) processOrphans(hash *chainhash.Hash) []*dcrutil.Tx {
return acceptedTxns
}
// PruneStakeTx is the function which is called everytime a new block is
// PruneStakeTx is the function which is called every time a new block is
// processed. The idea is any outstanding SStx that hasn't been mined in a
// certain period of time (CoinbaseMaturity) and the submitted SStx's
// stake difficulty is below the current required stake difficulty should be

View File

@ -123,7 +123,7 @@ func errToRejectErr(err error) (wire.RejectCode, string) {
// Return a generic rejected string if there is no error. This really
// should not happen unless the code elsewhere is not setting an error
// as it should be, but it's best to be safe and simply return a generic
// string rather than allowing the following code that derferences the
// string rather than allowing the following code that dereferences the
// err to panic.
if err == nil {
return wire.RejectInvalid, "rejected"

View File

@ -988,7 +988,7 @@ func handleTooFewVoters(nextHeight int64,
// handleCreatedBlockTemplate stores a successfully created block template to
// the appropriate cache if needed, then returns the template to the miner to
// work on. The stored template is a copy of the template, to prevent races
// from occuring in case the template is mined on by the CPUminer.
// from occurring in case the template is mined on by the CPUminer.
func handleCreatedBlockTemplate(blockTemplate *BlockTemplate,
bm *blockManager) (*BlockTemplate, error) {
curTemplate := bm.GetCurrentTemplate()

View File

@ -22,7 +22,7 @@ A quick overview of the major features peer provides are as follows:
- Full duplex reading and writing of decred protocol messages
- Automatic handling of the initial handshake process including protocol
version negotiation
- Asynchronous message queueing of outbound messages with optional channel for
- Asynchronous message queuing of outbound messages with optional channel for
notification when the message is actually sent
- Flexible peer configuration
- Caller is responsible for creating outgoing connections and listening for
@ -145,7 +145,7 @@ raw message bytes using a format similar to hexdump -C.
Improvement Proposals
This package supports all improvement proposals supported by the wire packge.
This package supports all improvement proposals supported by the wire package.
(https://godoc.org/github.com/decred/dcrd/wire#hdr-Bitcoin_Improvement_Proposals)
*/
package peer

View File

@ -382,7 +382,7 @@ type HostToNetAddrFunc func(host string, port uint16,
// communications via the peer-to-peer protocol. It provides full duplex
// reading and writing, automatic handling of the initial handshake process,
// querying of usage statistics and other information about the remote peer such
// as its address, user agent, and protocol version, output message queueing,
// as its address, user agent, and protocol version, output message queuing,
// inventory trickling, and the ability to dynamically register and unregister
// callbacks for handling decred protocol messages.
//
@ -1279,9 +1279,9 @@ out:
case sccReceiveMessage:
// Remove received messages from the expected
// reponse map. Since certain commands expect
// one of a group of responses, remove everyting
// in the expected group accordingly.
// response map. Since certain commands expect
// one of a group of responses, remove
// everything in the expected group accordingly.
switch msgCmd := msg.message.Command(); msgCmd {
case wire.CmdBlock:
fallthrough
@ -1623,10 +1623,10 @@ out:
log.Tracef("Peer input handler done for %s", p)
}
// queueHandler handles the queueing of outgoing data for the peer. This runs
// as a muxer for various sources of input so we can ensure that server and
// peer handlers will not block on us sending a message.
// We then pass the data on to outHandler to be actually written.
// queueHandler handles the queuing of outgoing data for the peer. This runs as
// a muxer for various sources of input so we can ensure that server and peer
// handlers will not block on us sending a message. That data is then passed on
// to outHandler to be actually written.
func (p *Peer) queueHandler() {
pendingMsgs := list.New()
invSendQueue := list.New()

View File

@ -212,7 +212,7 @@ func checkInputsStandard(tx *dcrutil.Tx, txType stake.TxType,
return nil
}
// checkPkScriptStandard performs a series of checks on a transaction ouput
// checkPkScriptStandard performs a series of checks on a transaction output
// script (public key script) to ensure it is a "standard" public key script.
// A standard public key script is one that is a recognized form, and for
// multi-signature scripts, only contains from 1 to maxStandardMultiSigKeys

View File

@ -217,7 +217,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"verifymessage": handleVerifyMessage,
}
// list of commands that we recognise, but for which dcrd has no support because
// list of commands that we recognize, but for which dcrd has no support because
// it lacks support for wallet functionality. For these commands the user
// should ask a connected instance of dcrwallet.
var rpcAskWallet = map[string]struct{}{
@ -1371,7 +1371,7 @@ func stringInSlice(a string, list []string) bool {
func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *chaincfg.Params,
vinExtra int, filterAddrMap map[string]struct{}) []dcrjson.VinPrevOut {
// Use a dynamically sized list to accomodate the address filter.
// Use a dynamically sized list to accommodate the address filter.
vinList := make([]dcrjson.VinPrevOut, 0, len(mtx.TxIn))
// Coinbase transactions only have a single txin by definition.
@ -1723,7 +1723,7 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}
}
// handleEstimateFee implenents the estimatefee command.
// TODO this is a very basic implimentation. It should be
// TODO this is a very basic implementation. It should be
// modified to match the bitcoin-core one.
func handleEstimateFee(s *rpcServer, cmd interface{},
closeChan <-chan struct{}) (interface{}, error) {
@ -2102,7 +2102,7 @@ func handleGetAddedNodeInfo(s *rpcServer, cmd interface{}, closeChan <-chan stru
c := cmd.(*dcrjson.GetAddedNodeInfoCmd)
// Retrieve a list of persistent (added) peers from the decred server
// and filter the list of peer per the specified address (if any).
// and filter the list of peers per the specified address (if any).
peers := s.server.AddedNodeInfo()
if c.Node != nil {
node := *c.Node
@ -2218,9 +2218,9 @@ func handleGetBestBlockHash(s *rpcServer, cmd interface{}, closeChan <-chan stru
// minimum difficulty using the passed bits field from the header of a block.
func getDifficultyRatio(bits uint32) float64 {
// The minimum difficulty is the max possible proof-of-work limit bits
// converted back to a number. Note this is not the same as the the
// proof of work limit directly because the block difficulty is encoded
// in a block with the compact form which loses precision.
// converted back to a number. Note this is not the same as the proof of
// work limit directly because the block difficulty is encoded in a block
// with the compact form which loses precision.
max := blockchain.CompactToBig(activeNetParams.PowLimitBits)
target := blockchain.CompactToBig(bits)
@ -2616,10 +2616,10 @@ func (state *gbtWorkState) templateUpdateChan(prevHash *chainhash.Hash, lastGene
// updateBlockTemplate creates or updates a block template for the work state.
// A new block template will be generated when the current best block has
// changed or the transactions in the memory pool have been updated and it has
// been some time has passed since the last template was generated. Otherwise,
// the timestamp for the existing block template is updated (and possibly the
// been long enough since the last template was generated. Otherwise, the
// timestamp for the existing block template is updated (and possibly the
// difficulty on testnet per the consesus rules). Finally, if the
// useCoinbaseValue flag is flase and the existing block template does not
// useCoinbaseValue flag is false and the existing block template does not
// already contain a valid payment address, the block template will be updated
// with a randomly selected payment address from the list of configured
// addresses.
@ -2829,7 +2829,7 @@ func (state *gbtWorkState) blockTemplateResult(bm *blockManager,
// depends on. This is necessary since the created block must
// ensure proper ordering of the dependencies. A map is used
// before creating the final array to prevent duplicate entries
// when mutiple inputs reference the same transaction.
// when multiple inputs reference the same transaction.
dependsMap := make(map[int64]struct{})
for _, txIn := range tx.TxIn {
if idx, ok := txIndex[txIn.PreviousOutPoint.Hash]; ok {
@ -3095,7 +3095,7 @@ func (state *gbtWorkState) blockTemplateResult(bm *blockManager,
return &reply, nil
}
// handleGetBlockTemplateLongPoll a helper for handleGetBlockTemplateRequest
// handleGetBlockTemplateLongPoll is a helper for handleGetBlockTemplateRequest
// which deals with handling long polling for block templates. When a caller
// sends a request with a long poll ID that was previously returned, a response
// is not sent until the caller should stop working on the previous block
@ -3117,8 +3117,8 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase
return nil, err
}
// Just return the current block template if the the long poll ID
// provided by the caller is invalid.
// Just return the current block template if the long poll ID provided by
// the caller is invalid.
prevHash, lastGenerated, err := decodeTemplateID(longPollID)
if err != nil {
result, err := state.blockTemplateResult(s.server.blockManager,
@ -3156,8 +3156,8 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase
// Register the previous hash and last generated time for notifications
// Get a channel that will be notified when the template associated with
// the provided ID is is stale and a new block template should be
// returned to the caller.
// the provided ID is stale and a new block template should be returned to
// the caller.
longPollChan := state.templateUpdateChan(prevHash, lastGenerated)
state.Unlock()
@ -4398,7 +4398,7 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) {
data = data[:getworkDataLen]
copy(data[wire.MaxBlockHeaderPayload:], blake256Pad)
// The final result reverses the each of the fields to little endian.
// The final result reverses each of the fields to little endian.
// In particular, the data, hash1, and midstate fields are treated as
// arrays of uint32s (per the internal sha256 hashing state) which are
// in big endian, and thus each 4 bytes is byte swapped. The target is

View File

@ -37,7 +37,7 @@ const (
// websocketSendBufferSize is the number of elements the send channel
// can queue before blocking. Note that this only applies to requests
// handled directly in the websocket client input handler or the async
// handler since notifications have their own queueing mechanism
// handler since notifications have their own queuing mechanism
// independent of the send channel buffer.
websocketSendBufferSize = 50
)
@ -207,7 +207,7 @@ func (m *wsNotificationManager) queueHandler() {
func (m *wsNotificationManager) NotifyBlockConnected(block *dcrutil.Block) {
// As NotifyBlockConnected will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationBlockConnected)(block):
@ -220,7 +220,7 @@ func (m *wsNotificationManager) NotifyBlockConnected(block *dcrutil.Block) {
func (m *wsNotificationManager) NotifyBlockDisconnected(block *dcrutil.Block) {
// As NotifyBlockDisconnected will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationBlockDisconnected)(block):
@ -233,7 +233,7 @@ func (m *wsNotificationManager) NotifyBlockDisconnected(block *dcrutil.Block) {
func (m *wsNotificationManager) NotifyReorganization(rd *blockchain.ReorganizationNtfnsData) {
// As NotifyReorganization will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationReorganization)(rd):
@ -247,7 +247,7 @@ func (m *wsNotificationManager) NotifyWinningTickets(
wtnd *WinningTicketsNtfnData) {
// As NotifyWinningTickets will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationWinningTickets)(wtnd):
@ -262,7 +262,7 @@ func (m *wsNotificationManager) NotifySpentAndMissedTickets(
tnd *blockchain.TicketNotificationsData) {
// As NotifySpentAndMissedTickets will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationSpentAndMissedTickets)(tnd):
@ -276,7 +276,7 @@ func (m *wsNotificationManager) NotifyNewTickets(
tnd *blockchain.TicketNotificationsData) {
// As NotifyNewTickets will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationNewTickets)(tnd):
@ -290,7 +290,7 @@ func (m *wsNotificationManager) NotifyStakeDifficulty(
stnd *StakeDifficultyNtfnData) {
// As NotifyNewTickets will be called by the block manager
// and the RPC server may no longer be running, use a select
// statement to unblock enqueueing the notification once the RPC
// statement to unblock enqueuing the notification once the RPC
// server has begun shutting down.
select {
case m.queueNotification <- (*notificationStakeDifficulty)(stnd):
@ -310,7 +310,7 @@ func (m *wsNotificationManager) NotifyMempoolTx(tx *dcrutil.Tx, isNew bool) {
// As NotifyMempoolTx will be called by mempool and the RPC server
// may no longer be running, use a select statement to unblock
// enqueueing the notification once the RPC server has begun
// enqueuing the notification once the RPC server has begun
// shutting down.
select {
case m.queueNotification <- n:
@ -1488,11 +1488,11 @@ out:
rpcsLog.Tracef("Websocket client input handler done for %s", c.addr)
}
// notificationQueueHandler handles the queueing of outgoing notifications for
// notificationQueueHandler handles the queuing of outgoing notifications for
// the websocket client. This runs as a muxer for various sources of input to
// ensure that queueing up notifications to be sent will not block. Otherwise,
// ensure that queuing up notifications to be sent will not block. Otherwise,
// slow clients could bog down the other systems (such as the mempool or block
// manager) which are queueing the data. The data is passed on to outHandler to
// manager) which are queuing the data. The data is passed on to outHandler to
// actually be written. It must be run as a goroutine.
func (c *wsClient) notificationQueueHandler() {
ntfnSentChan := make(chan bool, 1) // nonblocking sync
@ -1779,7 +1779,7 @@ func (c *wsClient) WaitForShutdown() {
// manager, websocket connection, remote address, and whether or not the client
// has already been authenticated (via HTTP Basic access authentication). The
// returned client is ready to start. Once started, the client will process
// incoming and outgoing messages in separate goroutines complete with queueing
// incoming and outgoing messages in separate goroutines complete with queuing
// and asynchrous handling for long-running operations.
func newWebsocketClient(server *rpcServer, conn *websocket.Conn,
remoteAddr string, authenticated bool, isAdmin bool) (*wsClient, error) {
@ -2289,9 +2289,8 @@ func recoverFromReorg(db database.Db, minBlock, maxBlock int64,
return hashList, nil
}
// descendantBlock returns the appropiate JSON-RPC error if a current block
// 'cur' fetched during a reorganize is not a direct child of the parent block
// 'prev'.
// descendantBlock returns the appropriate JSON-RPC error if a current block
// fetched during a reorganize is not a direct child of the parent block hash.
func descendantBlock(prev, cur *dcrutil.Block) error {
if prev == nil || cur == nil {
return fmt.Errorf("descendantBlock passed nil block pointer")

View File

@ -574,7 +574,7 @@ func (sp *serverPeer) OnTx(p *peer.Peer, msg *wire.MsgTx) {
// Queue the transaction up to be handled by the block manager and
// intentionally block further receives until the transaction is fully
// processed and known good or bad. This helps prevent a malicious peer
// from queueing up a bunch of bad transactions before disconnecting (or
// from queuing up a bunch of bad transactions before disconnecting (or
// being disconnected) and wasting memory.
sp.server.blockManager.QueueTx(tx, sp)
<-sp.txProcessed
@ -594,7 +594,7 @@ func (sp *serverPeer) OnBlock(p *peer.Peer, msg *wire.MsgBlock, buf []byte) {
// Queue the block up to be handled by the block manager and
// intentionally block further receives until the network block is fully
// processed and known good or bad. This helps prevent a malicious peer
// from queueing up a bunch of bad blocks before disconnecting (or being
// from queuing up a bunch of bad blocks before disconnecting (or being
// disconnected) and wasting memory. Additionally, this behavior is
// depended on by at least the block acceptance test tool as the
// reference implementation processes blocks in the same thread and
@ -639,7 +639,7 @@ func (sp *serverPeer) OnGetData(p *peer.Peer, msg *wire.MsgGetData) {
// This incremental score decays each minute to half of its value.
sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata")
// We wait on this wait channel periodically to prevent queueing
// We wait on this wait channel periodically to prevent queuing
// far more data than we can send in a reasonable time, wasting memory.
// The waiting occurs after the database fetch for the next one to
// provide a little pipelining.

View File

@ -18,7 +18,7 @@ import (
type ScriptFlags uint32
const (
// ScriptBip16 defines whether the bip16 threshhold has passed and thus
// ScriptBip16 defines whether the bip16 threshold has passed and thus
// pay-to-script hash transactions will be fully validated.
ScriptBip16 ScriptFlags = 1 << iota

View File

@ -73,7 +73,7 @@ var (
ErrStackElementTooBig = errors.New("element in script too large")
// ErrStackUnknownAddress is returned when ScriptToAddrHash does not
// recognise the pattern of the script and thus can not find the address
// recognize the pattern of the script and thus can not find the address
// for payment.
ErrStackUnknownAddress = errors.New("non-recognised address")

View File

@ -448,7 +448,7 @@ func getSigOpCount(pops []parsedOpcode, precise bool) int {
fallthrough
case OP_CHECKMULTISIGVERIFY:
// If we are being precise then look for familiar
// patterns for multisig, for now all we recognise is
// patterns for multisig, for now all we recognize is
// OP_1 - OP_16 to signify the number of pubkeys.
// Otherwise, we use the max of 20.
if precise && i > 0 &&

View File

@ -238,7 +238,7 @@ func (b *ScriptBuilder) Reset() *ScriptBuilder {
return b
}
// Script returns the currently built script. When any errors occured while
// Script returns the currently built script. When any errors occurred while
// building the script, the script will be returned up the point of the first
// error along with the error.
func (b *ScriptBuilder) Script() ([]byte, error) {

View File

@ -402,7 +402,7 @@ func mergeScripts(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int,
return mergeMultiSig(tx, idx, addresses, nRequired, pkScript,
sigScript, prevScript)
// It doesn't actualy make sense to merge anything other than multiig
// It doesn't actually make sense to merge anything other than multiig
// and scripthash (because it could contain multisig). Everything else
// has either zero signature, can't be spent, or has a single signature
// which is either present or not. The other two cases are handled
@ -572,7 +572,7 @@ func (sc ScriptClosure) GetScript(address dcrutil.Address) ([]byte, error) {
// looked up by calling getKey() with the string of the given address.
// Any pay-to-script-hash signatures will be similarly looked up by calling
// getScript. If previousScript is provided then the results in previousScript
// will be merged in a type-dependant manner with the newly generated.
// will be merged in a type-dependent manner with the newly generated.
// signature script.
func SignTxOutput(chainParams *chaincfg.Params, tx *wire.MsgTx, idx int,
pkScript []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB,

View File

@ -32,7 +32,7 @@ func (w *fixedWriter) Write(p []byte) (n int, err error) {
return
}
// Bytes returns the bytes alreayd written to the fixed writer.
// Bytes returns the bytes already written to the fixed writer.
func (w *fixedWriter) Bytes() []byte {
return w.b
}