multi: Correct typos.

Correct typos found by reading code and creative grepping.
This commit is contained in:
Aaron Campbell 2019-08-16 18:37:58 -04:00 committed by Dave Collins
parent b69302960f
commit 03678bb754
122 changed files with 257 additions and 254 deletions

View File

@ -140,7 +140,7 @@ const (
newBucketsPerAddress = 8
// numMissingDays is the number of days before which we assume an
// address has vanished if we have not seen it announced in that long.
// address has vanished if we have not seen it announced in that long.
numMissingDays = 30
// numRetries is the number of tried without a single success before
@ -382,7 +382,7 @@ func (a *AddrManager) savePeers() {
return
}
// First we make a serialisable datastructure so we can encode it to JSON.
// First we make a serialisable data structure so we can encode it to JSON.
sam := new(serializedAddrManager)
sam.Version = serialisationVersion
copy(sam.Key[:], a.key[:])
@ -753,7 +753,7 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
// the relevant .onion address.
func ipString(na *wire.NetAddress) string {
if isOnionCatTor(na) {
// We know now that na.IP is long enogh.
// We know now that na.IP is long enough.
base32 := base32.StdEncoding.EncodeToString(na.IP[6:])
return strings.ToLower(base32) + ".onion"
}
@ -902,7 +902,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
ka.lastattempt = now
ka.attempts = 0
// move to tried set, optionally evicting other addresses if neeed.
// move to tried set, optionally evicting other addresses if needed.
if ka.tried {
return
}
@ -974,7 +974,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress) {
a.addrNew[newBucket][rmkey] = rmka
}
// SetServices sets the services for the giiven address to the provided value.
// SetServices sets the services for the given address to the provided value.
func (a *AddrManager) SetServices(addr *wire.NetAddress, services wire.ServiceFlag) {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -1070,7 +1070,7 @@ const (
// Ipv6Strong represents a connection state between two IPV6 addresses.
Ipv6Strong
// Private reprsents a connection state connect between two Tor addresses.
// Private represents a connection state connect between two Tor addresses.
Private
)
@ -1181,7 +1181,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
return bestAddress
}
// IsPeerNaValid asserts if the the provided local address is routable
// IsPeerNaValid asserts if the provided local address is routable
// and reachable from the peer that suggested it.
func (a *AddrManager) IsPeerNaValid(localAddr, remoteAddr *wire.NetAddress) bool {
net := getNetwork(localAddr)

View File

@ -21,7 +21,7 @@ var (
ipNet("192.168.0.0", 16, 32),
}
// rfc2544Net specifies the the IPv4 block as defined by RFC2544
// rfc2544Net specifies the IPv4 block as defined by RFC2544
// (198.18.0.0/15)
rfc2544Net = ipNet("198.18.0.0", 15, 32)
@ -78,7 +78,7 @@ var (
// byte number. It then stores the first 6 bytes of the address as
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
//
// This is the same range used by OnionCat, which is part part of the
// This is the same range used by OnionCat, which is part of the
// RFC4193 unique local IPv6 range.
//
// In summary the format is:

View File

@ -163,7 +163,7 @@ func DecodeNoLimit(bech string) (string, []byte, error) {
return "", nil, ErrInvalidLength(len(bech))
}
// Only ASCII characters between 33 and 126 are allowed.
// Only ASCII characters between 33 and 126 are allowed.
var hasLower, hasUpper bool
for i := 0; i < len(bech); i++ {
if bech[i] < 33 || bech[i] > 126 {

View File

@ -115,7 +115,7 @@ func TestCanDecodeUnlimtedBech32(t *testing.T) {
}
// BenchmarkEncodeDecodeCycle performs a benchmark for a full encode/decode
// cycle of a bech32 string. It also reports the allocation count, which we
// cycle of a bech32 string. It also reports the allocation count, which we
// expect to be 2 for a fully optimized cycle.
func BenchmarkEncodeDecodeCycle(b *testing.B) {

View File

@ -584,7 +584,7 @@ func (b *BlockChain) fetchBlockByNode(node *blockNode) (*dcrutil.Block, error) {
// pruneStakeNodes removes references to old stake nodes which should no
// longer be held in memory so as to keep the maximum memory usage down.
// It proceeds from the bestNode back to the determined minimum height node,
// finds all the relevant children, and then drops the the stake nodes from
// finds all the relevant children, and then drops the stake nodes from
// them by assigning nil and allowing the memory to be recovered by GC.
//
// This function MUST be called with the chain state lock held (for writes).
@ -914,7 +914,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block, parent *dcrutil.Blo
}
// Update the transaction spend journal by removing the record
// that contains all txos spent by the block .
// that contains all txos spent by the block.
err = dbRemoveSpendJournalEntry(dbTx, block.Hash())
if err != nil {
return err
@ -1118,7 +1118,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error {
tip = n.parent
}
// Load the fork block if there are blocks to attach and its not already
// Load the fork block if there are blocks to attach and it's not already
// loaded which will be the case if no nodes were detached. The fork block
// is used as the parent to the first node to be attached below.
forkBlock := nextBlockToDetach
@ -1437,7 +1437,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block, parent *dcrutil.Bl
// In the fast add case the code to check the block connection
// was skipped, so the utxo view needs to load the referenced
// utxos, spend them, and add the new utxos being created by
// this block. Also, in the case the the block votes against
// this block. Also, in the case the block votes against
// the parent, its regular transaction tree must be
// disconnected.
if fastAdd {

View File

@ -1152,7 +1152,7 @@ func (hp *hash256prng) Hash256Rand() uint32 {
}
// Roll over the entire PRNG by re-hashing the seed when the hash
// iterator index overlows a uint32.
// iterator index overflows a uint32.
if hp.idx > math.MaxUint32 {
hp.seed = chainhash.HashH(hp.seed[:])
hp.cachedHash = hp.seed
@ -1568,7 +1568,7 @@ func (g *Generator) ReplaceVoteBitsN(voteNum int, voteBits uint16) func(*wire.Ms
stx := b.STransactions[voteNum]
if !isVoteTx(stx) {
panic(fmt.Sprintf("attempt to replace non-vote "+
"transaction #%d for for block %s", voteNum,
"transaction #%d for block %s", voteNum,
b.BlockHash()))
}
@ -2458,7 +2458,7 @@ func (g *Generator) AssertTipBlockSigOpsCount(expected int) {
}
}
// AssertTipBlockSize panics if the if the current tip block associated with the
// AssertTipBlockSize panics if the current tip block associated with the
// generator does not have the specified size when serialized.
func (g *Generator) AssertTipBlockSize(expected int) {
serializeSize := g.tip.SerializeSize()

View File

@ -489,7 +489,7 @@ func dbMaybeStoreBlock(dbTx database.Tx, block *dcrutil.Block) error {
// NOTE: The transaction version and flags are only encoded when the spent
// txout was the final unspent output of the containing transaction.
// Otherwise, the header code will be 0 and the version is not serialized at
// all. This is done because that information is only needed when the utxo
// all. This is done because that information is only needed when the utxo
// set no longer has it.
//
// Example:
@ -511,7 +511,7 @@ type spentTxOut struct {
amount int64 // The amount of the output.
txType stake.TxType // The stake type of the transaction.
height uint32 // Height of the the block containing the tx.
height uint32 // Height of the block containing the tx.
index uint32 // Index in the block of the transaction.
scriptVersion uint16 // The version of the scripting language.
txVersion uint16 // The version of creating tx.

View File

@ -931,7 +931,7 @@ func TestSpendJournalErrors(t *testing.T) {
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected.
// transaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
t.Parallel()

View File

@ -375,7 +375,7 @@ testLoop:
// TestChainViewNil ensures that creating and accessing a nil chain view behaves
// as expected.
func TestChainViewNil(t *testing.T) {
// Ensure two unininitialized views are considered equal.
// Ensure two uninitialized views are considered equal.
view := newChainView(nil)
if !view.Equals(newChainView(nil)) {
t.Fatal("uninitialized nil views unequal")

View File

@ -116,7 +116,7 @@ func chainSetup(dbName string, params *chaincfg.Params) (*BlockChain, func(), er
return chain, teardown, nil
}
// newFakeChain returns a chain that is usable for syntetic tests. It is
// newFakeChain returns a chain that is usable for synthetic tests. It is
// important to note that this chain has no database associated with it, so
// it is not usable with all functions and the tests must take care when making
// use of it.
@ -651,7 +651,7 @@ func (g *chaingenHarness) AdvanceToStakeValidationHeight() {
func (g *chaingenHarness) AdvanceFromSVHToActiveAgenda(voteID string) {
g.t.Helper()
// Find the correct deployment for the provided ID along with the the yes
// Find the correct deployment for the provided ID along with the yes
// vote choice within it.
params := g.Params()
deploymentVer, deployment, err := findDeployment(params, voteID)

View File

@ -723,7 +723,7 @@ const (
// from the flags byte.
txTypeBitmask = 0x0c
// txTypeShift is the number of bits to shift falgs to the right to yield the
// txTypeShift is the number of bits to shift flags to the right to yield the
// correct integer value after applying the bitmask with AND.
txTypeShift = 2
)

View File

@ -640,7 +640,7 @@ func calcNextStakeDiffV2(params *chaincfg.Params, nextHeight, curDiff, prevPoolS
// nextDiff = -----------------------------------
// prevPoolSizeAll * targetPoolSizeAll
//
// Further, the Sub parameter must calculate the denomitor first using
// Further, the Sub parameter must calculate the denominator first using
// integer math.
targetPoolSizeAll := votesPerBlock * (ticketPoolSize + ticketMaturity)
curPoolSizeAllBig := big.NewInt(curPoolSizeAll)

View File

@ -110,8 +110,8 @@ const (
// ErrUnexpectedDifficulty indicates specified bits do not align with
// the expected value either because it doesn't match the calculated
// valued based on difficulty regarted rules or it is out of the valid
// range.
// value based on difficulty regarding the rules or it is out of the
// valid range.
ErrUnexpectedDifficulty
// ErrHighHash indicates the block does not hash to a value which is
@ -390,7 +390,7 @@ const (
ErrRegTxCreateStakeOut
// ErrInvalidFinalState indicates that the final state of the PRNG included
// in the the block differed from the calculated final state.
// in the block differed from the calculated final state.
ErrInvalidFinalState
// ErrPoolSize indicates an error in the ticket pool size for this block.

View File

@ -18,7 +18,7 @@ import (
)
// This example demonstrates how to create a new chain instance and use
// ProcessBlock to attempt to attempt add a block to the chain. As the package
// ProcessBlock to attempt to add a block to the chain. As the package
// overview documentation describes, this includes all of the Decred consensus
// rules. This example intentionally attempts to insert a duplicate genesis
// block to illustrate how an invalid block is handled.

View File

@ -272,7 +272,7 @@ func replaceStakeSigScript(sigScript []byte) func(*wire.MsgBlock) {
}
// additionalPoWTx returns a function that itself takes a block and modifies it
// by adding the the provided transaction to the regular transaction tree.
// by adding the provided transaction to the regular transaction tree.
func additionalPoWTx(tx *wire.MsgTx) func(*wire.MsgBlock) {
return func(b *wire.MsgBlock) {
b.AddTransaction(tx)
@ -307,8 +307,8 @@ func encodeNonCanonicalBlock(b *wire.MsgBlock) []byte {
return buf.Bytes()
}
// assertTipsNonCanonicalBlockSize panics if the if the current tip block
// associated with the generator does not have the specified non-canonical size
// assertTipsNonCanonicalBlockSize panics if the current tip block associated
// with the generator does not have the specified non-canonical size
// when serialized.
func assertTipNonCanonicalBlockSize(g *chaingen.Generator, expected int) {
tip := g.Tip()
@ -726,7 +726,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ---------------------------------------------------------------------
// The comments below identify the structure of the chain being built.
//
// The values in parenthesis repesent which outputs are being spent.
// The values in parenthesis represent which outputs are being spent.
//
// For example, b1(0) indicates the first collected spendable output
// which, due to the code above to create the correct number of blocks,
@ -1879,8 +1879,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Create block with duplicate transactions in the regular transaction
// tree.
//
// This test relies on the shape of the shape of the merkle tree to test
// the intended condition. That is the reason for the assertion.
// This test relies on the shape of the merkle tree to test the
// intended condition. That is the reason for the assertion.
//
// ... -> brs3(14)
// \-> bmf14(15)

View File

@ -42,7 +42,8 @@ const (
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
// levelOffset is the offset in the level key which identifies the
// level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
@ -159,7 +160,7 @@ func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc, blockIndex uint32
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// uses the passed block hash fetching function in order to convert the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, entry *TxIndexEntry, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
@ -361,7 +362,7 @@ func maxEntriesForLevel(level uint8) int {
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// dbRemoveAddrIndexEntries removes the specified number of entries from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
@ -503,7 +504,7 @@ func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte,
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// empty, so the loop will perform another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {

View File

@ -19,7 +19,7 @@ const (
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// local clock that is used to determine that it is likely wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)

View File

@ -32,7 +32,7 @@ const (
// of work was expended to create a block which satisifies the proof of
// work requirement.
//
// Finally, this notification is only sent if the the chain is believed
// Finally, this notification is only sent if the chain is believed
// to be current and the chain lock is NOT released, so consumers must
// take care to avoid calling blockchain functions to avoid potential
// deadlock.

View File

@ -98,7 +98,7 @@ func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) e
// the block chain along with best chain selection and reorganization.
//
// When no errors occurred during processing, the first return value indicates
// the length of the fork the block extended. In the case it either exteneded
// the length of the fork the block extended. In the case it either extended
// the best chain or is now the tip of the best chain due to causing a
// reorganize, the fork length will be 0. The second return value indicates
// whether or not the block is an orphan, in which case the fork length will

View File

@ -223,7 +223,7 @@ func TestCalcSequenceLock(t *testing.T) {
{
// A transaction with a single input. The input's
// sequence number encodes a relative locktime in blocks
// (3 blocks). The sequence lock should have a value
// (3 blocks). The sequence lock should have a value
// of -1 for seconds, but a height of 2 meaning it can
// be included at height 3.
name: "3 blocks",
@ -381,7 +381,7 @@ func TestCalcSequenceLock(t *testing.T) {
// Ensure both the returned sequence lock seconds and block
// height match the expected values.
if seqLock.MinTime != test.want.MinTime {
t.Errorf("%s: mistmached seconds - got %v, want %v",
t.Errorf("%s: mismatched seconds - got %v, want %v",
test.name, seqLock.MinTime, test.want.MinTime)
continue
}

View File

@ -38,7 +38,7 @@ const (
// OP_RETURNs were missing or contained invalid addresses.
ErrSStxInvalidOutputs
// ErrSStxInOutProportions indicates the the number of inputs in an SStx
// ErrSStxInOutProportions indicates the number of inputs in an SStx
// was not equal to the number of output minus one.
ErrSStxInOutProportions

View File

@ -59,7 +59,7 @@ const (
// v: height
//
// 4. BlockUndo
// Block removal data, for reverting the the first 3 database buckets to
// Block removal data, for reverting the first 3 database buckets to
// a previous state.
//
// k: height

View File

@ -15,7 +15,7 @@ const numTicketKeys = 42500
var (
// generatedTicketKeys is used to store ticket keys generated for use
// in the benchmarks so that they only need to be generatd once for all
// in the benchmarks so that they only need to be generated once for all
// benchmarks that use them.
genTicketKeysLock sync.Mutex
generatedTicketKeys []Key

View File

@ -57,7 +57,7 @@ type Immutable struct {
root *treapNode
count int
// totalSize is the best estimate of the total size of of all data in
// totalSize is the best estimate of the total size of all data in
// the treap including the keys, values, and node sizes.
totalSize uint64
}

View File

@ -463,7 +463,7 @@ func TestImmutableDuplicatePut(t *testing.T) {
testTreap = testTreap.Put(key, value)
expectedSize += nodeFieldsSize + uint64(len(key)) + nodeValueSize
// Put a duplicate key with the the expected final value.
// Put a duplicate key with the expected final value.
testTreap = testTreap.Put(key, expectedVal)
// Ensure the key still exists and is the new value.

View File

@ -65,12 +65,12 @@ const (
// hash of the block in which voting was missed.
MaxOutputsPerSSRtx = MaxInputsPerSStx
// SStxPKHMinOutSize is the minimum size of of an OP_RETURN commitment output
// SStxPKHMinOutSize is the minimum size of an OP_RETURN commitment output
// for an SStx tx.
// 20 bytes P2SH/P2PKH + 8 byte amount + 4 byte fee range limits
SStxPKHMinOutSize = 32
// SStxPKHMaxOutSize is the maximum size of of an OP_RETURN commitment output
// SStxPKHMaxOutSize is the maximum size of an OP_RETURN commitment output
// for an SStx tx.
SStxPKHMaxOutSize = 77

View File

@ -190,7 +190,7 @@ func (c *SubsidyCache) CalcBlockSubsidy(height int64) int64 {
// subsidy for the requested interval.
if reqInterval > lastCachedInterval {
// Return zero for all intervals after the subsidy reaches zero. This
// enforces an upper bound on the the number of entries in the cache.
// enforces an upper bound on the number of entries in the cache.
if lastCachedSubsidy == 0 {
return 0
}

View File

@ -188,7 +188,7 @@ func (c *thresholdStateCache) Update(hash chainhash.Hash, state ThresholdStateTu
c.entries[hash] = state
}
// MarkFlushed marks all of the current udpates as flushed to the database.
// MarkFlushed marks all of the current updates as flushed to the database.
// This is useful so the caller can ensure the needed database updates are not
// lost until they have successfully been written to the database.
func (c *thresholdStateCache) MarkFlushed() {
@ -666,9 +666,9 @@ func (b *BlockChain) isFixSeqLocksAgendaActive(prevNode *blockNode) (bool, error
return state.State == ThresholdActive, nil
}
// IsFixSeqLocksAgendaActive returns whether or not whether or not the fix
// sequence locks agenda vote, as defined in DCP0004 has passed and is now
// active for the block AFTER the current best chain block.
// IsFixSeqLocksAgendaActive returns whether or not the fix sequence locks
// agenda vote, as defined in DCP0004 has passed and is now active for the
// block AFTER the current best chain block.
//
// This function is safe for concurrent access.
func (b *BlockChain) IsFixSeqLocksAgendaActive() (bool, error) {

View File

@ -21,7 +21,7 @@ func (s timeSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the timstamp with index i should sort before the
// Less returns whether the timestamp with index i should sort before the
// timestamp with index j. It is part of the sort.Interface implementation.
func (s timeSorter) Less(i, j int) bool {
return s[i] < s[j]

View File

@ -242,7 +242,7 @@ func upgradeToVersion2(db database.DB, chainParams *chaincfg.Params, dbInfo *dat
}
// migrateBlockIndex migrates all block entries from the v1 block index bucket
// manged by ffldb to the v2 bucket managed by this package. The v1 bucket
// managed by ffldb to the v2 bucket managed by this package. The v1 bucket
// stored all block entries keyed by block hash, whereas the v2 bucket stores
// them keyed by block height + hash. Also, the old block index only stored the
// header, while the new one stores all info needed to recreate block nodes.

View File

@ -725,7 +725,7 @@ func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, flags B
return ruleError(ErrTooManyRevocations, errStr)
}
// A block must only contain stake transactions of the the allowed
// A block must only contain stake transactions of the allowed
// types.
//
// NOTE: This is not possible to hit at the time this comment was
@ -752,7 +752,7 @@ func checkBlockSanity(block *dcrutil.Block, timeSource MedianTimeSource, flags B
return ruleError(ErrFreshStakeMismatch, errStr)
}
// A block header must commit to the the actual number of votes that are
// A block header must commit to the actual number of votes that are
// in the block.
if int64(header.Voters) != totalVotes {
errStr := fmt.Sprintf("block header commitment to %d votes "+
@ -1027,7 +1027,7 @@ func (b *BlockChain) checkBlockHeaderPositional(header *wire.BlockHeader, prevNo
//
// The flags modify the behavior of this function as follows:
// - BFFastAdd: The transactions are not checked to see if they are expired and
// the coinbae height check is not performed.
// the coinbase height check is not performed.
//
// The flags are also passed to checkBlockHeaderPositional. See its
// documentation for how the flags modify its behavior.
@ -1794,7 +1794,7 @@ func checkTicketRedeemerCommitments(ticketHash *chainhash.Hash, ticketOuts []*st
// revocations).
//
// It should be noted that, due to the scaling, the sum of the generated
// amounts for mult-participant votes might be a few atoms less than
// amounts for multi-participant votes might be a few atoms less than
// the full amount and the difference is treated as a standard
// transaction fee.
commitmentAmt := extractTicketCommitAmount(commitmentScript)
@ -1803,7 +1803,7 @@ func checkTicketRedeemerCommitments(ticketHash *chainhash.Hash, ticketOuts []*st
// Ensure the amount paid adheres to the commitment while taking into
// account any fee limits that might be imposed. The output amount must
// exactly match the calculated amount when when not encumbered with a
// exactly match the calculated amount when not encumbered with a
// fee limit. On the other hand, when it is encumbered, it must be
// between the minimum amount imposed by the fee limit and the
// calculated amount.
@ -2096,7 +2096,7 @@ func CheckTransactionInputs(subsidyCache *standalone.SubsidyCache, tx *dcrutil.T
}
}
// Perform additional checks on vote transactions such as verying that the
// Perform additional checks on vote transactions such as verifying that the
// referenced ticket exists, the stakebase input commits to correct subsidy,
// the output amounts adhere to the commitments of the referenced ticket,
// and the ticket maturity requirements are met.

View File

@ -215,7 +215,7 @@ func TestSequenceLocksActive(t *testing.T) {
got := SequenceLockActive(&seqLock, test.blockHeight,
time.Unix(test.medianTime, 0))
if got != test.want {
t.Errorf("%s: mismatched seqence lock status - got %v, "+
t.Errorf("%s: mismatched sequence lock status - got %v, "+
"want %v", test.name, got, test.want)
continue
}

View File

@ -301,7 +301,7 @@ type PeerNotifier interface {
// the passed transactions.
AnnounceNewTransactions(txns []*dcrutil.Tx)
// UpdatePeerHeights updates the heights of all peers who have have
// UpdatePeerHeights updates the heights of all peers who have
// announced the latest connected main chain block, or a recognized orphan.
UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int64, updateSource *serverPeer)
@ -1322,7 +1322,7 @@ func (b *blockManager) haveInventory(invVect *wire.InvVect) (bool, error) {
return entry != nil && !entry.IsFullySpent(), nil
}
// The requested inventory is is an unsupported type, so just claim
// The requested inventory is an unsupported type, so just claim
// it is known to avoid requesting it.
return true, nil
}
@ -1754,7 +1754,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// which could result in a deadlock.
block, ok := notification.Data.(*dcrutil.Block)
if !ok {
bmgrLog.Warnf("New tip block checkedd notification is not a block.")
bmgrLog.Warnf("New tip block checked notification is not a block.")
break
}
@ -1806,7 +1806,7 @@ func (b *blockManager) handleBlockchainNotification(notification *blockchain.Not
// other words, it is extending the shorter side chain. The reorg depth
// would be 106 - (103 - 3) = 6. This should intuitively make sense,
// because if the side chain were to be extended enough to become the
// best chain, it would result in a a reorg that would remove 6 blocks,
// best chain, it would result in a reorg that would remove 6 blocks,
// namely blocks 101, 102, 103, 104, 105, and 106.
blockHash := block.Hash()
bestHeight := band.BestHeight

View File

@ -95,7 +95,7 @@ type Choice struct {
// (abstain) and exist only once in the Vote.Choices array.
IsAbstain bool
// This coince indicates a hard No Vote. By convention this must exist
// This choice indicates a hard No Vote. By convention this must exist
// only once in the Vote.Choices array.
IsNo bool
}
@ -114,7 +114,7 @@ func (v *Vote) VoteIndex(vote uint16) int {
}
const (
// VoteIDMaxBlockSize is the vote ID for the the maximum block size
// VoteIDMaxBlockSize is the vote ID for the maximum block size
// increase agenda used for the hard fork demo.
VoteIDMaxBlockSize = "maxblocksize"
@ -364,7 +364,7 @@ type Params struct {
// SLIP-0044 registered coin type used for BIP44, used in the hierarchical
// deterministic path for address generation.
// All SLIP-0044 registered coin types are are defined here:
// All SLIP-0044 registered coin types are defined here:
// https://github.com/satoshilabs/slips/blob/master/slip-0044.md
SLIP0044CoinType uint32

View File

@ -120,7 +120,7 @@ func main() {
cmd, err := dcrjson.NewCmd(method, params...)
if err != nil {
// Show the error along with its error code when it's a
// dcrjson.Error as it reallistcally will always be since the
// dcrjson.Error as it realistically will always be since the
// NewCmd function is only supposed to return errors of that
// type.
if jerr, ok := err.(dcrjson.Error); ok {

View File

@ -149,7 +149,7 @@ type config struct {
MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"`
Generate bool `long:"generate" description:"Generate (mine) coins using the CPU"`
MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"`
BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"`
BlockMinSize uint32 `long:"blockminsize" description:"Minimum block size in bytes to be used when creating a block"`
BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"`
BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"`
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
@ -267,7 +267,7 @@ func supportedSubsystems() []string {
// the levels accordingly. An appropriate error is returned if anything is
// invalid.
func parseAndSetDebugLevels(debugLevel string) error {
// When the specified string doesn't have any delimters, treat it as
// When the specified string doesn't have any delimiters, treat it as
// the log level for all subsystems.
if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") {
// Validate debug log level.
@ -298,7 +298,7 @@ func parseAndSetDebugLevels(debugLevel string) error {
// Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists {
str := "the specified subsystem [%v] is invalid -- " +
"supported subsytems %v"
"supported subsystems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems())
}
@ -899,7 +899,7 @@ func loadConfig() (*config, []string, error) {
return nil, nil, err
}
// Validate the the minrelaytxfee.
// Validate the minrelaytxfee.
cfg.minRelayTxFee, err = dcrutil.NewAmount(cfg.MinRelayTxFee)
if err != nil {
str := "%s: invalid minrelaytxfee: %v"
@ -924,7 +924,7 @@ func loadConfig() (*config, []string, error) {
return nil, nil, err
}
// Limit the max orphan count to a sane vlue.
// Limit the max orphan count to a sane value.
if cfg.MaxOrphanTxs < 0 {
str := "%s: the maxorphantx option may not be less than 0 " +
"-- parsed [%d]"
@ -981,7 +981,7 @@ func loadConfig() (*config, []string, error) {
// !--nocfilters and --dropcfindex do not mix.
if !cfg.NoCFilters && cfg.DropCFIndex {
err := errors.New("dropcfindex cannot be actived without nocfilters")
err := errors.New("dropcfindex cannot be activated without nocfilters")
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err

View File

@ -459,7 +459,7 @@ func TestNetworkFailure(t *testing.T) {
// TestStopFailed tests that failed connections are ignored after connmgr is
// stopped.
//
// We have a dailer which sets the stop flag on the conn manager and returns an
// We have a dialer which sets the stop flag on the conn manager and returns an
// err so that the handler assumes that the conn manager is stopped and ignores
// the failure.
func TestStopFailed(t *testing.T) {

View File

@ -91,7 +91,7 @@ type cpuminerConfig struct {
// block chain is current. This is used by the automatic persistent
// mining routine to determine whether or it should attempt mining.
// This is useful because there is no point in mining if the chain is
// not current since any solved blocks would be on a side chain and and
// not current since any solved blocks would be on a side chain and
// up orphaned anyways.
IsCurrent func() bool
}

View File

@ -36,7 +36,7 @@ type Driver struct {
var drivers = make(map[string]*Driver)
// RegisterDriver adds a backend database driver to available interfaces.
// ErrDbTypeRegistered will be retruned if the database type for the driver has
// ErrDbTypeRegistered will be returned if the database type for the driver has
// already been registered.
func RegisterDriver(driver Driver) error {
if _, exists := drivers[driver.DbType]; exists {
@ -63,7 +63,7 @@ func SupportedDrivers() []string {
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
// ErrDbUnknownType will be returned if the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
@ -78,7 +78,7 @@ func Create(dbType string, args ...interface{}) (DB, error) {
// specific to the database type driver. See the documentation for the database
// driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
// ErrDbUnknownType will be returned if the database type is not registered.
func Open(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {

View File

@ -22,7 +22,7 @@ var (
)
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {

View File

@ -82,14 +82,14 @@ const (
// ErrKeyRequired indicates at attempt to insert a zero-length key.
ErrKeyRequired
// ErrKeyTooLarge indicates an attmempt to insert a key that is larger
// ErrKeyTooLarge indicates an attempt to insert a key that is larger
// than the max allowed key size. The max key size depends on the
// specific backend driver being used. As a general rule, key sizes
// should be relatively, so this should rarely be an issue.
ErrKeyTooLarge
// ErrValueTooLarge indicates an attmpt to insert a value that is larger
// than max allowed value size. The max key size depends on the
// ErrValueTooLarge indicates an attempt to insert a value that is
// larger than max allowed value size. The max key size depends on the
// specific backend driver being used.
ErrValueTooLarge

View File

@ -4,7 +4,7 @@
// license that can be found in the LICENSE file.
/*
This test file is part of the database package rather than than the
This test file is part of the database package rather than the
database_test package so it can bridge access to the internals to properly test
cases which are either not possible or can't reliably be tested via the public
interface. The functions, constants, and variables are only exported while the

View File

@ -134,10 +134,10 @@ type blockStore struct {
// lruMutex protects concurrent access to the least recently used list
// and lookup map.
//
// openBlocksLRU tracks how the open files are refenced by pushing the
// openBlocksLRU tracks how the open files are referenced by pushing the
// most recently used files to the front of the list thereby trickling
// the least recently used files to end of the list. When a file needs
// to be closed due to exceeding the the max number of allowed open
// to be closed due to exceeding the max number of allowed open
// files, the one at the end of the list is closed.
//
// fileNumToLRUElem is a mapping between a specific block file number
@ -744,7 +744,7 @@ func scanBlockFiles(dbPath string) (int, uint32) {
// and offset set and all fields initialized.
func newBlockStore(basePath string, network wire.CurrencyNet) *blockStore {
// Look for the end of the latest block to file to determine what the
// write cursor position is from the viewpoing of the block files on
// write cursor position is from the viewpoint of the block files on
// disk.
fileNum, fileOff := scanBlockFiles(basePath)
if fileNum == -1 {

View File

@ -132,7 +132,7 @@ func makeDbErr(c database.ErrorCode, desc string, err error) database.Error {
}
// convertErr converts the passed leveldb error into a database error with an
// equivalent error code and the passed description. It also sets the passed
// equivalent error code and the passed description. It also sets the passed
// error as the underlying error.
func convertErr(desc string, ldbErr error) database.Error {
// Use the driver-specific error code by default. The code below will
@ -1015,7 +1015,7 @@ func (tx *transaction) notifyActiveIters() {
tx.activeIterLock.RUnlock()
}
// checkClosed returns an error if the the database or transaction is closed.
// checkClosed returns an error if the database or transaction is closed.
func (tx *transaction) checkClosed() error {
// The transaction is no longer valid if it has been closed.
if tx.closed {
@ -1090,7 +1090,7 @@ func (tx *transaction) deleteKey(key []byte, notifyIterators bool) {
// transaction commit if needed.
tx.pendingKeys.Delete(key)
// Add the key to the list to be deleted on transaction commit.
// Add the key to the list to be deleted on transaction commit.
tx.pendingRemove.Put(key, nil)
// Notify the active iterators about the change if the flag is set.

View File

@ -468,9 +468,9 @@ func (c *dbCache) commitTreaps(pendingKeys, pendingRemove TreapForEacher) error
})
}
// flush flushes the database cache to persistent storage. This involes syncing
// the block store and replaying all transactions that have been applied to the
// cache to the underlying database.
// flush flushes the database cache to persistent storage. This involves
// syncing the block store and replaying all transactions that have been
// applied to the cache to the underlying database.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) flush() error {

View File

@ -79,7 +79,7 @@ func init() {
UseLogger: useLogger,
}
if err := database.RegisterDriver(driver); err != nil {
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
panic(fmt.Sprintf("Failed to register database driver '%s': %v",
dbType, err))
}
}

View File

@ -4,7 +4,7 @@
// license that can be found in the LICENSE file.
/*
This test file is part of the ffldb package rather than than the ffldb_test
This test file is part of the ffldb package rather than the ffldb_test
package so it can bridge access to the internals to properly test cases which
are either not possible or can't reliably be tested via the public interface.
The functions are only exported while the tests are being run.

View File

@ -89,7 +89,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.CurrencyNet) ([]*dcr
}
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
@ -230,7 +230,7 @@ func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair)
return true
}
// testCursorInterface ensures the cursor itnerface is working properly by
// testCursorInterface ensures the cursor interface is working properly by
// exercising all of its functions on the passed bucket.
func testCursorInterface(tc *testContext, bucket database.Bucket) bool {
// Ensure a cursor can be obtained for the bucket.
@ -615,7 +615,7 @@ func rollbackOnPanic(t *testing.T, tx database.Tx) {
func testMetadataManualTxInterface(tc *testContext) bool {
// populateValues tests that populating values works as expected.
//
// When the writable flag is false, a read-only tranasction is created,
// When the writable flag is false, a read-only transaction is created,
// standard bucket tests for read-only transactions are performed, and
// the Commit function is checked to ensure it fails as expected.
//
@ -1189,7 +1189,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
// testFetchBlockIO ensures all of the block retrieval API functions work as
// expected for the provide set of blocks. The blocks must already be stored in
// the database, or at least stored into the the passed transaction. It also
// the database, or at least stored into the passed transaction. It also
// tests several error conditions such as ensuring the expected errors are
// returned when fetching blocks, headers, and regions that don't exist.
func testFetchBlockIO(tc *testContext, tx database.Tx) bool {

View File

@ -84,7 +84,7 @@ func loadBlocks(t *testing.T, dataFile string, network wire.CurrencyNet) ([]*dcr
}
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
@ -142,7 +142,7 @@ func TestConvertErr(t *testing.T) {
func TestCornerCases(t *testing.T) {
t.Parallel()
// Create a file at the datapase path to force the open below to fail.
// Create a file at the database path to force the open below to fail.
dbPath := filepath.Join(os.TempDir(), "ffldb-errors-v2")
_ = os.RemoveAll(dbPath)
fi, err := os.Create(dbPath)

View File

@ -449,7 +449,7 @@ type DB interface {
//
// NOTE: The transaction must be closed by calling Rollback or Commit on
// it when it is no longer needed. Failure to do so can result in
// unclaimed memory and/or inablity to close the database due to locks
// unclaimed memory and/or inability to close the database due to locks
// depending on the specific database implementation.
Begin(writable bool) (Tx, error)

View File

@ -8,7 +8,7 @@ treap
Package treap implements a treap data structure that is used to hold ordered
key/value pairs using a combination of binary search tree and heap semantics.
It is a self-organizing and randomized data structure that doesn't require
complex operations to to maintain balance. Search, insert, and delete
complex operations to maintain balance. Search, insert, and delete
operations are all O(log n). Both mutable and immutable variants are provided.
The mutable variant is typically faster since it is able to simply update the

View File

@ -7,7 +7,7 @@
Package treap implements a treap data structure that is used to hold ordered
key/value pairs using a combination of binary search tree and heap semantics.
It is a self-organizing and randomized data structure that doesn't require
complex operations to to maintain balance. Search, insert, and delete
complex operations to maintain balance. Search, insert, and delete
operations are all O(log n). Both mutable and immutable variants are provided.
The mutable variant is typically faster since it is able to simply update the

View File

@ -41,7 +41,7 @@ type Immutable struct {
root *treapNode
count int
// totalSize is the best estimate of the total size of of all data in
// totalSize is the best estimate of the total size of all data in
// the treap including the keys, values, and node sizes.
totalSize uint64
}

View File

@ -345,7 +345,7 @@ func TestImmutableDuplicatePut(t *testing.T) {
testTreap = testTreap.Put(key, key)
expectedSize += nodeFieldsSize + uint64(len(key)+len(key))
// Put a duplicate key with the the expected final value.
// Put a duplicate key with the expected final value.
testTreap = testTreap.Put(key, expectedVal)
// Ensure the key still exists and is the new value.

View File

@ -19,7 +19,7 @@ type Mutable struct {
root *treapNode
count int
// totalSize is the best estimate of the total size of of all data in
// totalSize is the best estimate of the total size of all data in
// the treap including the keys, values, and node sizes.
totalSize uint64
}

View File

@ -19,7 +19,7 @@ import (
// negative or not. Remember, in affine EC space, the negative
// is P - positiveX. The rest of the 255 bits then represent
// the Y-value in little endian.
// 2) For high effiency, 40 byte field elements (10x int32s) are
// 2) For high efficiency, 40 byte field elements (10x int32s) are
// often used to represent integers.
// 3) For further increases in efficiency, the affine (cartesian)
// coordinates are converted into projective (extended or non-

View File

@ -185,7 +185,7 @@ func TestConversion(t *testing.T) {
t.Fatalf("expected %v, got %v", true, cmp)
}
// Asert our results.
// Assert our results.
encodedNumStr := encodedNumToStrSet[encodedNumToStrIdx]
cmp = encodedNumStr == big.String()
if !cmp {

View File

@ -23,7 +23,7 @@ const (
)
// PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing
// things with the the private key without having to directly import the ecdsa
// things with the private key without having to directly import the ecdsa
// package.
type PrivateKey struct {
ecPk *ecdsa.PrivateKey

View File

@ -29,7 +29,7 @@ package secp256k1
// arithmetic between each array element which would lead to expensive carry
// propagation.
//
// Given the above, this implementation represents the the field elements as
// Given the above, this implementation represents the field elements as
// 10 uint32s with each word (array entry) treated as base 2^26. This was
// chosen for the following reasons:
// 1) Most systems at the current time are 64-bit (or at least have 64-bit
@ -37,7 +37,7 @@ package secp256k1
// intermediate results can typically be done using a native register (and
// using uint64s to avoid the need for additional half-word arithmetic)
// 2) In order to allow addition of the internal words without having to
// propagate the the carry, the max normalized value for each register must
// propagate the carry, the max normalized value for each register must
// be less than the number of bits available in the register
// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a
// reasonable choice for #2
@ -478,8 +478,8 @@ func (f *fieldVal) NegateVal(val *fieldVal, magnitude uint32) *fieldVal {
// already larger than the modulus and congruent to 7 (mod 12). When a
// value is already in the desired range, its magnitude is 1. Since 19
// is an additional "step", its magnitude (mod 12) is 2. Since any
// multiple of the modulus is conguent to zero (mod m), the answer can
// be shortcut by simply mulplying the magnitude by the modulus and
// multiple of the modulus is congruent to zero (mod m), the answer can
// be shortcut by simply multiplying the magnitude by the modulus and
// subtracting. Keeping with the example, this would be (2*12)-19 = 5.
f.n[0] = (magnitude+1)*fieldPrimeWordZero - val.n[0]
f.n[1] = (magnitude+1)*fieldPrimeWordOne - val.n[1]
@ -891,7 +891,7 @@ func (f *fieldVal) Square() *fieldVal {
// SquareVal squares the passed value and stores the result in f. Note that
// this function can overflow if multiplying any of the individual words
// exceeds a max uint32. In practice, this means the magnitude of the field
// being squred must be a max of 8 to prevent overflow.
// being squared must be a max of 8 to prevent overflow.
//
// The field value is returned to support chaining. This enables syntax like:
// f3.SquareVal(f).Mul(f) so that f3 = f^2 * f = f^3.
@ -1117,14 +1117,14 @@ func (f *fieldVal) SquareVal(val *fieldVal) *fieldVal {
// f.Inverse().Mul(f2) so that f = f^-1 * f2.
func (f *fieldVal) Inverse() *fieldVal {
// Fermat's little theorem states that for a nonzero number a and prime
// prime p, a^(p-1) = 1 (mod p). Since the multipliciative inverse is
// prime p, a^(p-1) = 1 (mod p). Since the multiplicative inverse is
// a*b = 1 (mod p), it follows that b = a*a^(p-2) = a^(p-1) = 1 (mod p).
// Thus, a^(p-2) is the multiplicative inverse.
//
// In order to efficiently compute a^(p-2), p-2 needs to be split into
// a sequence of squares and multipications that minimizes the number of
// multiplications needed (since they are more costly than squarings).
// Intermediate results are saved and reused as well.
// a sequence of squares and multiplications that minimizes the number
// of multiplications needed (since they are more costly than
// squarings). Intermediate results are saved and reused as well.
//
// The secp256k1 prime - 2 is 2^256 - 4294968275.
//

View File

@ -13,7 +13,7 @@ import (
)
// PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing
// things with the the private key without having to directly import the ecdsa
// things with the private key without having to directly import the ecdsa
// package.
type PrivateKey ecdsa.PrivateKey

View File

@ -55,7 +55,7 @@ const (
// unusable.
ErrBadNonce
// ErrZeroSigS indates a zero signature S value, which is invalid.
// ErrZeroSigS indicates a zero signature S value, which is invalid.
ErrZeroSigS
// ErrNonmatchingR indicates that all signatures to be combined in a

View File

@ -347,12 +347,12 @@ func recoverKeyFromSignature(sig *Signature, msg []byte,
}
// SignCompact produces a compact signature of the data in hash with the given
// private key on the given koblitz curve. The isCompressed parameter should
// private key on the given koblitz curve. The isCompressed parameter should
// be used to detail if the given signature should reference a compressed
// public key or not. If successful the bytes of the compact signature will be
// returned in the format:
// <(byte of 27+public key solution)+4 if compressed >< padded bytes for signature R><padded bytes for signature S>
// where the R and S parameters are padde up to the bitlengh of the curve.
// where the R and S parameters are padded up to the bitlengh of the curve.
func SignCompact(key *PrivateKey,
hash []byte, isCompressedKey bool) ([]byte, error) {
sig, err := key.Sign(hash)

View File

@ -489,7 +489,7 @@ func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect
// by this package are already registered by default.
//
// The arguments are most efficient when they are the exact same type as the
// underlying field in the command struct associated with the the method,
// underlying field in the command struct associated with the method,
// however this function also will perform a variety of conversions to make it
// more flexible. This allows, for example, command line args which are strings
// to be passed unaltered. In particular, the following conversions are

View File

@ -9,7 +9,7 @@ Package dcrjson provides infrastructure for working with Decred JSON-RPC APIs.
Overview
When communicating via the JSON-RPC protocol, all requests and responses must be
marshalled to and from the the wire in the appropriate format. This package
marshalled to and from the wire in the appropriate format. This package
provides infrastructure and primitives to ease this process.
JSON-RPC Protocol Overview

View File

@ -31,7 +31,7 @@ const (
// embedded type which is not not supported.
ErrEmbeddedType
// ErrUnexportedField indiciates the provided command struct contains an
// ErrUnexportedField indicates the provided command struct contains an
// unexported field which is not supported.
ErrUnexportedField
@ -59,7 +59,7 @@ const (
// help is missing.
ErrMissingDescription
// ErrNumParams inidcates the number of params supplied do not
// ErrNumParams indicates the number of params supplied do not
// match the requirements of the associated command.
ErrNumParams

View File

@ -713,8 +713,8 @@ func TestGenerateHelpErrors(t *testing.T) {
}
// TestGenerateHelp performs a very basic test to ensure GenerateHelp is working
// as expected. The internal are testd much more thoroughly in other tests, so
// there is no need to add more tests here.
// as expected. The internals are tested much more thoroughly in other tests,
// so there is no need to add more tests here.
func TestGenerateHelp(t *testing.T) {
t.Parallel()

View File

@ -25,7 +25,7 @@ const (
// UFWebsocketOnly indicates that the command can only be used when
// communicating with an RPC server over websockets. This typically
// applies to notifications and notification registration functions
// since neiher makes since when using a single-shot HTTP-POST request.
// since neither makes sense when using a single-shot HTTP-POST request.
UFWebsocketOnly
// UFNotification indicates that the command is actually a notification.
@ -245,7 +245,7 @@ func Register(method interface{}, params interface{}, flags UsageFlag) error {
}
}
// Ensure the default value can be unsmarshalled into the type
// Ensure the default value can be unmarshalled into the type
// and that defaults are only specified for optional fields.
if tag := rtf.Tag.Get("jsonrpcdefault"); tag != "" {
if !isOptional {

View File

@ -36,7 +36,7 @@ func (e ErrWrongWIFNetwork) Error() string {
// WIF contains the individual components described by the Wallet Import Format
// (WIF). A WIF string is typically used to represent a private key and its
// associated address in a way that may be easily copied and imported into or
// associated address in a way that may be easily copied and imported into or
// exported from wallet software. WIF strings may be decoded into this
// structure by calling DecodeWIF or created with a user-provided private key
// by calling NewWIF.

2
doc.go
View File

@ -108,7 +108,7 @@ Application Options:
addresses to use for generated blocks -- At least
one address is required if the generate option is
set
--blockminsize= Mininum block size in bytes to be used when creating
--blockminsize= Minimum block size in bytes to be used when creating
a block
--blockmaxsize= Maximum block size in bytes to be used when creating
a block (375000)

View File

@ -259,7 +259,7 @@ Here is how the right prefix for a commit is chosen.
Here are some of the reasons why wrapping your commit messages to 72 columns is
a good thing.
- git log doesnt do any special special wrapping of the commit messages. With
- git log doesnt do any special wrapping of the commit messages. With
the default pager of less -S, this means your paragraphs flow far off the edge
of the screen, making them difficult to read. On an 80 column terminal, if we
subtract 4 columns for the indent on the left and 4 more for symmetry on the

View File

@ -641,7 +641,7 @@ func (stats *Estimator) removeFromMemPool(blocksInMemPool int32, rate feeRate) {
// all higher fee buckets have >= successPct transactions confirmed in at most
// `targetConfs` confirmations.
// Note that sometimes the requested combination of targetConfs and successPct is
// not achieveable (hypothetical example: 99% of txs confirmed within 1 block)
// not achievable (hypothetical example: 99% of txs confirmed within 1 block)
// or there are not enough recorded statistics to derive a successful estimate
// (eg: confirmation tracking has only started or there was a period of very few
// transactions). In those situations, the appropriate error is returned.

2
ipc.go
View File

@ -29,7 +29,7 @@ var outgoingPipeMessages = make(chan pipeMessage)
// serviceControlPipeRx reads from the file descriptor fd of a read end pipe.
// This is intended to be used as a simple control mechanism for parent
// processes to communicate with and and manage the lifetime of a dcrd child
// processes to communicate with and manage the lifetime of a dcrd child
// process using a unidirectional pipe (on Windows, this is an anonymous pipe,
// not a named pipe).
//

2
log.go
View File

@ -36,7 +36,7 @@ func (logWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Loggers per subsystem. A single backend logger is created and all subsytem
// Loggers per subsystem. A single backend logger is created and all subsystem
// loggers created from it will write to the backend. When adding new
// subsystems, add the subsystem logger variable here and to the
// subsystemLoggers map.

View File

@ -12,15 +12,15 @@ Package lru implements a generic least-recently-used cache with near O(1) perf.
A least-recently-used (LRU) cache is a cache that holds a limited number of
items with an eviction policy such that when the capacity of the cache is
exceeded, the least-recently-used item is automatically removed when inserting a
new item. The meaining of used in this implementation is either accessing the
new item. The meaning of used in this implementation is either accessing the
item via a lookup or adding the item into the cache, including when the item
already exists.
## External Use
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to make use of a well-test and conccurrent safe
least-recently-used cache with near O(1) performance characteristics for
package for any projects needing to make use of a well-tested and concurrent
safe least-recently-used cache with near O(1) performance characteristics for
lookups, inserts, and deletions.
## Installation and Updating

View File

@ -58,7 +58,7 @@ func (m *Cache) Add(item interface{}) {
return
}
// Evict the least recently used entry (back of the list) if the the new
// Evict the least recently used entry (back of the list) if the new
// entry would exceed the size limit for the cache. Also reuse the list
// node so a new one doesn't have to be allocated.
if uint(len(m.cache))+1 > m.limit {

View File

@ -34,7 +34,7 @@ func TestCache(t *testing.T) {
testLoop:
for i, test := range tests {
// Create a new lru cache limited by the specified test limit and add
// all of the test vectors. This will cause evicition since there are
// all of the test vectors. This will cause eviction since there are
// more test items than the limits.
cache := NewCache(uint(test.limit))
for j := 0; j < numNonces; j++ {

View File

@ -10,7 +10,7 @@ LRU Cache
A least-recently-used (LRU) cache is a cache that holds a limited number of
items with an eviction policy such that when the capacity of the cache is
exceeded, the least-recently-used item is automatically removed when inserting a
new item. The meaining of used in this implementation is either accessing the
new item. The meaning of used in this implementation is either accessing the
item via a lookup or adding the item into the cache, including when the item
already exists.

View File

@ -1209,7 +1209,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *dcrutil.Tx, isNew, rateLimit, allow
// calculated below on its own would encourage several small
// transactions to avoid fees rather than one single larger transaction
// which is more desirable. Therefore, as long as the size of the
// transaction does not exceeed 1000 less than the reserved space for
// transaction does not exceed 1000 less than the reserved space for
// high-priority transactions, don't require a fee for it.
// This applies to non-stake transactions only.
serializedSize := int64(msgTx.SerializeSize())

View File

@ -263,7 +263,7 @@ func (s *fakeChain) SetStandardVerifyFlags(flags txscript.ScriptFlags) {
}
// FakeUxtoMedianTime returns the median time associated with the requested utxo
// from the cake chain instance.
// from the fake chain instance.
func (s *fakeChain) FakeUxtoMedianTime(prevOut *wire.OutPoint) int64 {
s.RLock()
medianTime := s.utxoTimes[*prevOut]
@ -1273,7 +1273,7 @@ func TestExpirationPruning(t *testing.T) {
}
// TestBasicOrphanRemoval ensure that orphan removal works as expected when an
// orphan that doesn't exist is removed both when there is another orphan that
// orphan that doesn't exist is removed both when there is another orphan that
// redeems it and when there is not.
func TestBasicOrphanRemoval(t *testing.T) {
t.Parallel()

View File

@ -2139,7 +2139,7 @@ type BgBlkTmplGenerator struct {
prng *rand.Rand
// These fields are provided by the caller when the generator is created and
// are either indepedently safe for concurrent access or do not change after
// are either independently safe for concurrent access or do not change after
// initialization.
//
// chain is the blockchain instance that is used to build the block and
@ -2472,9 +2472,9 @@ type regenHandlerState struct {
// been received in order to give the minimum number of required votes
// needed to build a block template on it an opportunity to propagate before
// attempting to find any other variants that extend the same parent as the
// current tip with enough votes to force a reorganation. This ensures the
// current tip with enough votes to force a reorganization. This ensures the
// first block that is seen is chosen to build templates on so long as it
// receives the mininum required votes in order to prevent PoW miners from
// receives the minimum required votes in order to prevent PoW miners from
// being able to gain an advantage through vote withholding. It is disabled
// if the minimum number of votes is received prior to the timeout.
awaitingSideChainMinVotes map[chainhash.Hash]struct{}
@ -2699,7 +2699,7 @@ func (g *BgBlkTmplGenerator) handleBlockConnected(ctx context.Context, state *re
// them an opportunity to propagate before attempting to find any other
// variants that extend the same parent with enough votes to force a
// reorganization. This ensures the first block that is seen is chosen to
// build templates on so long as it receives the mininum required votes in
// build templates on so long as it receives the minimum required votes in
// order to prevent PoW miners from being able to gain an advantage through
// vote withholding.
//
@ -2776,7 +2776,7 @@ func (g *BgBlkTmplGenerator) handleBlockAccepted(ctx context.Context, state *reg
// Ignore side chain blocks when the current tip already has enough votes
// for a template to be built on it. This ensures the first block that is
// seen is chosen to build templates on so long as it receives the mininum
// seen is chosen to build templates on so long as it receives the minimum
// required votes in order to prevent PoW miners from being able to gain an
// advantage through vote withholding.
if state.awaitingMinVotesHash == nil {
@ -2840,7 +2840,7 @@ func (g *BgBlkTmplGenerator) handleVote(ctx context.Context, state *regenHandler
voteTx.Hash(), minVotesHash, numVotes)
if numVotes >= g.minVotesRequired {
// Ensure the next template generated builds on the tip and clear
// all vote tracking to lock the current current tip in now that it
// all vote tracking to lock the current tip in now that it
// has the minimum required votes.
state.stopRegenTimer()
state.failedGenRetryTimeout = nil
@ -3078,7 +3078,7 @@ func (g *BgBlkTmplGenerator) tipSiblingsSortedByVotes(state *regenHandlerState)
}
// handleTrackSideChainsTimeout handles potentially reorganizing the chain to a
// side chain block with the most votes in the case the the minimum number of
// side chain block with the most votes in the case the minimum number of
// votes needed to build a block template on the current tip have not been
// received within a certain timeout.
//
@ -3185,7 +3185,7 @@ func (g *BgBlkTmplGenerator) regenHandler(ctx context.Context) {
// find any other variants that extend the same parent as the current
// tip with enough votes to force a reorganization. This ensures the
// first block that is seen is chosen to build templates on so long as
// it receives the mininum required votes in order to prevent PoW miners
// it receives the minimum required votes in order to prevent PoW miners
// from being able to gain an advantage through vote withholding. It is
// disabled if the minimum number of votes is received prior to the
// timeout.
@ -3270,7 +3270,7 @@ func (g *BgBlkTmplGenerator) BlockDisconnected(block *dcrutil.Block) {
}
// VoteReceived informs the background block template generator that a new vote
// has been received. It is the caller's reponsibility to ensure this is only
// has been received. It is the caller's responsibility to ensure this is only
// invoked with valid votes.
//
// This function is safe for concurrent access.

View File

@ -31,7 +31,7 @@ type TxDesc struct {
// Added is the time when the entry was added to the source pool.
Added time.Time
// Height is the block height when the entry was added to the the source
// Height is the block height when the entry was added to the source
// pool.
Height int64

View File

@ -46,7 +46,7 @@ func checkPowLimitsAreConsistent(t *testing.T, params *chaincfg.Params) {
// Header bits of each block define target difficulty of the subsequent block.
//
// The first few solved blocks of the network will inherit the genesis block
// bits value before the difficulty reajustment takes place.
// bits value before the difficulty readjustment takes place.
//
// Solved block shouldn't be rejected due to the PoW limit check.
//

View File

@ -703,7 +703,7 @@ func (p *Peer) LastRecv() time.Time {
// LocalAddr returns the local address of the connection.
//
// This function is safe fo concurrent access.
// This function is safe for concurrent access.
func (p *Peer) LocalAddr() net.Addr {
var localAddr net.Addr
if p.Connected() {

View File

@ -1252,7 +1252,7 @@ func TestChainSvrCmds(t *testing.T) {
}
// TestChainSvrCmdErrors ensures any errors that occur in the command during
// custom mashal and unmarshal are as expected.
// custom marshal and unmarshal are as expected.
func TestChainSvrCmdErrors(t *testing.T) {
t.Parallel()

View File

@ -11,7 +11,7 @@ import (
)
// TestChainSvrCustomResults ensures any results that have custom marshalling
// work as inteded.
// work as intended.
// and unmarshal code of results are as expected.
func TestChainSvrCustomResults(t *testing.T) {
t.Parallel()
@ -79,7 +79,7 @@ func TestChainSvrCustomResults(t *testing.T) {
continue
}
if string(marshalled) != test.expected {
t.Errorf("Test #%d (%s) unexpected marhsalled data - "+
t.Errorf("Test #%d (%s) unexpected marshalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.expected)
continue

View File

@ -9,7 +9,7 @@ JSON-RPC commands, return values, and notifications.
Overview
When communicating via the JSON-RPC protocol, all requests and responses must be
marshalled to and from the the wire in the appropriate format. This package
marshalled to and from the wire in the appropriate format. This package
provides data structures and primitives that are registered with dcrjson to ease
this process. An overview specific to this package is provided here, however it
is also instructive to read the documentation for the dcrjson package
@ -18,7 +18,7 @@ is also instructive to read the documentation for the dcrjson package
Marshalling and Unmarshalling
The types in this package map to the required parts of the protocol as discussed
in the dcrjson documention
in the dcrjson documentation
- Request Objects (type Request)
- Commands (type <Foo>Cmd)

View File

@ -63,14 +63,14 @@ type FutureDebugLevelResult chan *response
// Receive waits for the response promised by the future and returns the result
// of setting the debug logging level to the passed level specification or the
// list of of the available subsystems for the special keyword 'show'.
// list of the available subsystems for the special keyword 'show'.
func (r FutureDebugLevelResult) Receive() (string, error) {
res, err := receiveFuture(r)
if err != nil {
return "", err
}
// Unmashal the result as a string.
// Unmarshal the result as a string.
var result string
err = json.Unmarshal(res, &result)
if err != nil {

View File

@ -122,7 +122,8 @@ type jsonRequest struct {
type Client struct {
id uint64 // atomic, so must stay 64-bit aligned
// config holds the connection configuration assoiated with this client.
// config holds the connection configuration associated with this
// client.
config *ConnConfig
// wsConn is the underlying websocket connection when not in HTTP POST
@ -328,7 +329,7 @@ func futureError(err error) chan *response {
// result checks whether the unmarshaled response contains a non-nil error,
// returning an unmarshaled dcrjson.RPCError (or an unmarshaling error) if so.
// If the response is not an error, the raw bytes of the request are
// returned for further unmashaling into specific result types.
// returned for further unmarshalling into specific result types.
func (r rawResponse) result() (result []byte, err error) {
if r.Error != nil {
return nil, r.Error
@ -408,7 +409,7 @@ func (c *Client) handleMessage(msg []byte) {
// to have come from reading from the websocket connection in wsInHandler,
// should be logged.
func (c *Client) shouldLogReadError(err error) bool {
// No logging when the connetion is being forcibly disconnected.
// No logging when the connection is being forcibly disconnected.
select {
case <-c.shutdown:
return false

View File

@ -215,7 +215,7 @@ func (c *Client) Ping() error {
// GetPeerInfoAsync RPC invocation (or an applicable error).
type FutureGetPeerInfoResult chan *response
// Receive waits for the response promised by the future and returns data about
// Receive waits for the response promised by the future and returns data about
// each connected network peer.
func (r FutureGetPeerInfoResult) Receive() ([]chainjson.GetPeerInfoResult, error) {
res, err := receiveFuture(r)

View File

@ -1200,7 +1200,7 @@ func (r FutureNotifyWinningTicketsResult) Receive() error {
}
// NotifyWinningTicketsAsync returns an instance of a type that can be used
// to get the result of the RPC at some future time by invoking the Receive
// to get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See NotifyWinningTickets for the blocking version and more details.
@ -1250,7 +1250,7 @@ func (r FutureNotifySpentAndMissedTicketsResult) Receive() error {
}
// NotifySpentAndMissedTicketsAsync returns an instance of a type that can be used
// to get the result of the RPC at some future time by invoking the Receive
// to get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See NotifySpentAndMissedTickets for the blocking version and more details.

View File

@ -383,7 +383,7 @@ func (c *Client) CreateRawSSRtxAsync(inputs []chainjson.TransactionInput, fee dc
return c.sendCmd(cmd)
}
// CreateRawSSRtx returns a new SSR transactionm (revoking an sstx).
// CreateRawSSRtx returns a new SSR transaction (revoking an sstx).
func (c *Client) CreateRawSSRtx(inputs []chainjson.TransactionInput, fee dcrutil.Amount) (*wire.MsgTx, error) {
return c.CreateRawSSRtxAsync(inputs, fee).Receive()
}

View File

@ -224,7 +224,7 @@ func (c *Client) ListUnspent() ([]walletjson.ListUnspentResult, error) {
// ListUnspentMin returns all unspent transaction outputs known to a wallet,
// using the specified number of minimum conformations and default number of
// maximum confiramtions (9999999) as a filter.
// maximum confirmations (9999999) as a filter.
func (c *Client) ListUnspentMin(minConf int) ([]walletjson.ListUnspentResult, error) {
return c.ListUnspentMinAsync(minConf).Receive()
}
@ -617,7 +617,7 @@ func (r FutureSendManyResult) Receive() (*chainhash.Hash, error) {
return nil, err
}
// Unmashal result as a string.
// Unmarshal result as a string.
var txHash string
err = json.Unmarshal(res, &txHash)
if err != nil {
@ -739,7 +739,7 @@ func (r FuturePurchaseTicketResult) Receive() ([]*chainhash.Hash, error) {
return nil, err
}
// Unmashal result as a string slice.
// Unmarshal result as a string slice.
var txHashesStr []string
err = json.Unmarshal(res, &txHashesStr)
if err != nil {
@ -814,7 +814,7 @@ func (c *Client) PurchaseTicketAsync(fromAccount string,
}
// PurchaseTicket takes an account and a spending limit and calls the async
// puchasetickets command.
// purchasetickets command.
func (c *Client) PurchaseTicket(fromAccount string,
spendLimit dcrutil.Amount, minConf *int, ticketAddress dcrutil.Address,
numTickets *int, poolAddress dcrutil.Address, poolFees *dcrutil.Amount,
@ -1138,7 +1138,7 @@ func (r FutureGetAddressesByAccountResult) Receive(net dcrutil.AddressParams) ([
return nil, err
}
// Unmashal result as an array of string.
// Unmarshal result as an array of string.
var addrStrings []string
err = json.Unmarshal(res, &addrStrings)
if err != nil {
@ -1313,7 +1313,7 @@ func (c *Client) KeyPoolRefillSize(newSize uint) error {
// applicable error).
type FutureListAccountsResult chan *response
// Receive waits for the response promised by the future and returns returns a
// Receive waits for the response promised by the future and returns a
// map of account names and their associated balances.
func (r FutureListAccountsResult) Receive() (map[string]dcrutil.Amount, error) {
res, err := receiveFuture(r)
@ -1553,7 +1553,7 @@ func (c *Client) GetReceivedByAccountMinConf(account string, minConfirms int) (d
// of a GetUnconfirmedBalanceAsync RPC invocation (or an applicable error).
type FutureGetUnconfirmedBalanceResult chan *response
// Receive waits for the response promised by the future and returns returns the
// Receive waits for the response promised by the future and returns the
// unconfirmed balance from the server for the specified account.
func (r FutureGetUnconfirmedBalanceResult) Receive() (dcrutil.Amount, error) {
res, err := receiveFuture(r)

View File

@ -391,7 +391,7 @@ func rpcRuleError(fmtStr string, args ...interface{}) *dcrjson.RPCError {
}
// rpcDuplicateTxError is a convenience function to convert a
// rejected duplicate tx error to an RPC error with the appropriate code set.
// rejected duplicate tx error to an RPC error with the appropriate code set.
func rpcDuplicateTxError(fmtStr string, args ...interface{}) *dcrjson.RPCError {
return dcrjson.NewRPCError(dcrjson.ErrRPCDuplicateTx,
fmt.Sprintf(fmtStr, args...))
@ -1314,7 +1314,7 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}
// Get information about the script.
// Ignore the error here since an error means the script couldn't parse
// and there is no additinal information about it anyways.
// and there is no additional information about it anyways.
scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(
scriptVersion, script, s.server.chainParams)
addresses := make([]string, len(addrs))
@ -1342,7 +1342,7 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{}
return reply, nil
}
// handleEstimateFee implenents the estimatefee command.
// handleEstimateFee implements the estimatefee command.
// TODO this is a very basic implementation. It should be
// modified to match the bitcoin-core one.
func handleEstimateFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
@ -1935,7 +1935,7 @@ func handleGetBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i
func handleGetBlockchainInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
best := s.chain.BestSnapshot()
// Fetch the current chain work using the the best block hash.
// Fetch the current chain work using the best block hash.
chainWork, err := s.chain.ChainWork(&best.Hash)
if err != nil {
return nil, rpcInternalError(err.Error(), "Could not fetch chain work.")
@ -2522,7 +2522,7 @@ func (state *gbtWorkState) blockTemplateResult(bm *blockManager, useCoinbaseValu
// depends on. This is necessary since the created block must
// ensure proper ordering of the dependencies. A map is used
// before creating the final array to prevent duplicate entries
// when mutiple inputs reference the same transaction.
// when multiple inputs reference the same transaction.
dependsMap := make(map[int64]struct{})
for _, txIn := range stx.TxIn {
if idx, ok := stxIndex[txIn.PreviousOutPoint.Hash]; ok {
@ -2572,7 +2572,7 @@ func (state *gbtWorkState) blockTemplateResult(bm *blockManager, useCoinbaseValu
return nil, rpcInternalError(err.Error(), context)
}
// Choose the correct maximum block size as defined by the network
// Choose the correct maximum block size as defined by the network
// parameters and the current status of any hard fork votes to change
// it when serialized.
maxBlockSize, err := bm.chain.MaxBlockSize()
@ -2716,7 +2716,7 @@ func handleGetBlockTemplateLongPoll(s *rpcServer, longPollID string, useCoinbase
// Fallthrough
}
// Get the lastest block template
// Get the latest block template
state.Lock()
defer state.Unlock()
@ -3150,7 +3150,7 @@ func handleGetHeaders(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
// Until wire.MsgGetHeaders uses []Hash instead of the []*Hash, this
// conversion is necessary. The wire protocol getheaders is (probably)
// called much more often than this RPC, so chain.LocateHeaders is
// optimized for that and this is given the performance penality.
// optimized for that and this is given the performance penalty.
locators := make(blockchain.BlockLocator, len(blockLocators))
for i := range blockLocators {
locators[i] = &blockLocators[i]
@ -4214,8 +4214,8 @@ func handleGetWorkRequest(s *rpcServer) (interface{}, error) {
// also in big endian, but it is treated as a uint256 and byte swapped
// to little endian accordingly.
//
// The fact the fields are reversed in this way is rather odd and likey
// an artifact of some legacy internal state in the reference
// The fact the fields are reversed in this way is rather odd and
// likely an artifact of some legacy internal state in the reference
// implementation, but it is required for compatibility.
target := bigToLEUint256(standalone.CompactToBig(msgBlock.Header.Bits))
reply := &types.GetWorkResult{
@ -4547,7 +4547,7 @@ func fetchInputTxos(s *rpcServer, tx *wire.MsgTx) (map[wire.OutPoint]wire.TxOut,
voteTx := stake.IsSSGen(tx)
for txInIndex, txIn := range tx.TxIn {
// vote tx have null input for vin[0],
// skip since it resolvces to an invalid transaction
// skip since it resolves to an invalid transaction
if voteTx && txInIndex == 0 {
continue
}
@ -4908,8 +4908,8 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan
// order and the number of results is still under the number requested.
if !reverse && len(addressTxns) < numRequested {
// Transactions in the mempool are not in a block header yet,
// so the block header field in the retieved transaction struct
// is left nil.
// so the block header field in the retrieved transaction
// struct is left nil.
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
uint32(numToSkip)-numSkipped, uint32(numRequested-
len(addressTxns)))
@ -5446,7 +5446,7 @@ func handleTicketFeeInfo(s *rpcServer, cmd interface{}, closeChan <-chan struct{
// We need data on windows from before this. Start from
// the last adjustment and move backwards through window
// lengths, calulating the fees data and appending it
// lengths, calculating the fees data and appending it
// each time.
if windows > 1 {
// Go down to the last height requested, except
@ -6145,7 +6145,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin
// the read deadline for the new connection and having one breaks long
// polling. However, not having a read deadline on the initial
// connection would mean clients can connect and idle forever. Thus,
// hijack the connecton from the HTTP server, clear the read deadline,
// hijack the connection from the HTTP server, clear the read deadline,
// and handle writing the response manually.
hj, ok := w.(http.Hijacker)
if !ok {

View File

@ -81,7 +81,7 @@ func testGetBlockHash(r *rpctest.Harness, t *testing.T) {
info, err := r.Node.GetInfo()
if err != nil {
t.Fatalf("call to getinfo cailed: %v", err)
t.Fatalf("call to getinfo failed: %v", err)
}
blockHash, err := r.Node.GetBlockHash(int64(info.Blocks))

View File

@ -226,7 +226,7 @@ var helpDescsEnUS = map[string]string{
"getbestblock--result0": "Get block height and hash of best block in the main chain.",
// GetBestBlockHashCmd help.
"getbestblockhash--synopsis": "Returns the hash of the of the best (most recent) block in the longest block chain.",
"getbestblockhash--synopsis": "Returns the hash of the best (most recent) block in the longest block chain.",
"getbestblockhash--result0": "The hex-encoded block hash",
// GetBlockCmd help.
@ -526,7 +526,7 @@ var helpDescsEnUS = map[string]string{
"agenda-choices": "All choices in this agenda.",
"choice-id": "Unique identifier of this choice.",
"choice-description": "Description of this choice.",
"choice-bits": "Bits that dentify this choice.",
"choice-bits": "Bits that identify this choice.",
"choice-isabstain": "This choice is to abstain from change.",
"choice-isno": "Hard no choice (1 and only 1 per agenda).",
"choice-count": "How many votes received.",

View File

@ -197,7 +197,7 @@ func (n *node) start() error {
return pid.Close()
}
// stop interrupts the running dcrd process process, and waits until it exits
// stop interrupts the running dcrd process, and waits until it exits
// properly. On windows, interrupt is not supported, so a kill signal is used
// instead
func (n *node) stop() error {

View File

@ -354,7 +354,7 @@ func (h *Harness) CreateTransaction(targetOutputs []*wire.TxOut, feeRate dcrutil
}
// UnlockOutputs unlocks any outputs which were previously marked as
// unspendabe due to being selected to fund a transaction via the
// unspendable due to being selected to fund a transaction via the
// CreateTransaction method.
//
// This function is safe for concurrent access.

View File

@ -31,7 +31,7 @@ const (
// JoinNodes is a synchronization tool used to block until all passed nodes are
// fully synced with respect to an attribute. This function will block for a
// period of time, finally returning once all nodes are synced according to the
// passed JoinType. This function be used to to ensure all active test
// passed JoinType. This function be used to ensure all active test
// harnesses are at a consistent state before proceeding to an assertion or
// check within rpc tests.
func JoinNodes(nodes []*Harness, joinType JoinType) error {

View File

@ -51,7 +51,7 @@ var (
// commitAmountMultiplier is a multiplier for the minimum stake difficulty,
// used to fund inputs used in purchasing tickets. This needs to be high
// enough that (minimumStakeDifficulty*commitAmountMultiplier) -
// minimumStakeDifficulty is grater than the dust limit and will allow the
// minimumStakeDifficulty is greater than the dust limit and will allow the
// ticket to be relayed on the network.
commitAmountMultiplier = int64(4)
)

View File

@ -296,7 +296,7 @@ func (m *wsNotificationManager) NotifyStakeDifficulty(
// NotifyMempoolTx passes a transaction accepted by mempool to the
// notification manager for transaction notification processing. If
// isNew is true, the tx is is a new transaction, rather than one
// isNew is true, the tx is a new transaction, rather than one
// added to the mempool during a reorg.
func (m *wsNotificationManager) NotifyMempoolTx(tx *dcrutil.Tx, isNew bool) {
n := &notificationTxAcceptedByMempool{
@ -1903,7 +1903,7 @@ func (c *wsClient) WaitForShutdown() {
// has already been authenticated (via HTTP Basic access authentication). The
// returned client is ready to start. Once started, the client will process
// incoming and outgoing messages in separate goroutines complete with queuing
// and asynchrous handling for long-running operations.
// and asynchronous handling for long-running operations.
func newWebsocketClient(server *rpcServer, conn *websocket.Conn,
remoteAddr string, authenticated bool, isAdmin bool) (*wsClient, error) {

Some files were not shown because too many files have changed in this diff Show More