2015-08-26 04:03:18 +00:00
|
|
|
// Copyright (c) 2013-2016 The btcsuite developers
|
2019-07-22 10:25:34 +00:00
|
|
|
// Copyright (c) 2015-2019 The Decred developers
|
2013-07-18 14:49:28 +00:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
2015-01-30 20:54:30 +00:00
|
|
|
package blockchain
|
2013-07-18 14:49:28 +00:00
|
|
|
|
|
|
|
|
import (
|
2016-04-29 02:19:12 +00:00
|
|
|
"fmt"
|
2013-07-18 14:49:28 +00:00
|
|
|
"math/big"
|
|
|
|
|
"time"
|
2014-07-02 16:04:59 +00:00
|
|
|
|
2019-08-06 13:42:04 +00:00
|
|
|
"github.com/decred/dcrd/blockchain/standalone"
|
2016-01-20 21:46:42 +00:00
|
|
|
"github.com/decred/dcrd/chaincfg/chainhash"
|
2019-07-22 10:25:34 +00:00
|
|
|
"github.com/decred/dcrd/chaincfg/v2"
|
2016-04-29 02:19:12 +00:00
|
|
|
"github.com/decred/dcrd/wire"
|
2013-07-18 14:49:28 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var (
|
2016-01-20 21:46:42 +00:00
|
|
|
// bigZero is 0 represented as a big.Int. It is defined here to avoid
|
|
|
|
|
// the overhead of creating it multiple times.
|
|
|
|
|
bigZero = big.NewInt(0)
|
|
|
|
|
|
2013-07-18 14:49:28 +00:00
|
|
|
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
|
|
|
|
// the overhead of creating it multiple times.
|
|
|
|
|
bigOne = big.NewInt(1)
|
|
|
|
|
|
|
|
|
|
// oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid
|
|
|
|
|
// the overhead of creating it multiple times.
|
|
|
|
|
oneLsh256 = new(big.Int).Lsh(bigOne, 256)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
|
|
|
|
// can have given starting difficulty bits and a duration. It is mainly used to
|
|
|
|
|
// verify that claimed proof of work by a block is sane as compared to a
|
|
|
|
|
// known good checkpoint.
|
2018-01-27 07:51:31 +00:00
|
|
|
func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
|
2013-07-18 14:49:28 +00:00
|
|
|
// Convert types used in the calculations below.
|
|
|
|
|
durationVal := int64(duration)
|
2016-01-20 21:46:42 +00:00
|
|
|
adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor)
|
|
|
|
|
maxRetargetTimespan := int64(b.chainParams.TargetTimespan) *
|
|
|
|
|
b.chainParams.RetargetAdjustmentFactor
|
2013-07-18 14:49:28 +00:00
|
|
|
|
2016-11-17 01:33:34 +00:00
|
|
|
// The test network rules allow minimum difficulty blocks once too much
|
|
|
|
|
// time has elapsed without mining a block.
|
2016-08-10 21:02:23 +00:00
|
|
|
if b.chainParams.ReduceMinDifficulty {
|
|
|
|
|
if durationVal > int64(b.chainParams.MinDiffReductionTime) {
|
2015-02-06 05:18:27 +00:00
|
|
|
return b.chainParams.PowLimitBits
|
2013-07-24 21:43:39 +00:00
|
|
|
}
|
|
|
|
|
}
|
2013-07-18 14:49:28 +00:00
|
|
|
|
|
|
|
|
// Since easier difficulty equates to higher numbers, the easiest
|
|
|
|
|
// difficulty for a given duration is the largest value possible given
|
|
|
|
|
// the number of retargets for the duration and starting difficulty
|
|
|
|
|
// multiplied by the max adjustment factor.
|
2019-08-06 13:42:04 +00:00
|
|
|
newTarget := standalone.CompactToBig(bits)
|
2015-02-06 05:18:27 +00:00
|
|
|
for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 {
|
2013-07-18 14:49:28 +00:00
|
|
|
newTarget.Mul(newTarget, adjustmentFactor)
|
|
|
|
|
durationVal -= maxRetargetTimespan
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Limit new value to the proof of work limit.
|
2015-02-06 05:18:27 +00:00
|
|
|
if newTarget.Cmp(b.chainParams.PowLimit) > 0 {
|
|
|
|
|
newTarget.Set(b.chainParams.PowLimit)
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-06 13:42:04 +00:00
|
|
|
return standalone.BigToCompact(newTarget)
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
2013-07-24 21:43:39 +00:00
|
|
|
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
|
|
|
|
// did not have the special testnet minimum difficulty rule applied.
|
2015-08-26 04:03:18 +00:00
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
2018-05-27 02:47:45 +00:00
|
|
|
func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
|
2013-07-24 21:43:39 +00:00
|
|
|
// Search backwards through the chain for the last block without
|
|
|
|
|
// the special rule applied.
|
2016-01-20 21:46:42 +00:00
|
|
|
blocksPerRetarget := b.chainParams.WorkDiffWindowSize *
|
2016-03-09 03:16:06 +00:00
|
|
|
b.chainParams.WorkDiffWindows
|
2013-07-24 21:43:39 +00:00
|
|
|
iterNode := startNode
|
2016-01-20 21:46:42 +00:00
|
|
|
for iterNode != nil && iterNode.height%blocksPerRetarget != 0 &&
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
iterNode.bits == b.chainParams.PowLimitBits {
|
2014-05-26 15:27:50 +00:00
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
iterNode = iterNode.parent
|
2013-07-24 21:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return the found difficulty or the minimum difficulty if no
|
|
|
|
|
// appropriate block was found.
|
2015-02-06 05:18:27 +00:00
|
|
|
lastBits := b.chainParams.PowLimitBits
|
2013-07-24 21:43:39 +00:00
|
|
|
if iterNode != nil {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
lastBits = iterNode.bits
|
2013-07-24 21:43:39 +00:00
|
|
|
}
|
2018-05-27 02:47:45 +00:00
|
|
|
return lastBits
|
2013-07-24 21:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
2013-07-18 14:49:28 +00:00
|
|
|
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
|
|
|
|
// after the passed previous block node based on the difficulty retarget rules.
|
2014-03-02 18:17:36 +00:00
|
|
|
// This function differs from the exported CalcNextRequiredDifficulty in that
|
|
|
|
|
// the exported version uses the current best chain as the previous block node
|
|
|
|
|
// while this function accepts any block node.
|
2018-01-27 07:51:31 +00:00
|
|
|
func (b *BlockChain) calcNextRequiredDifficulty(curNode *blockNode, newBlockTime time.Time) (uint32, error) {
|
2016-01-20 21:46:42 +00:00
|
|
|
// Get the old difficulty; if we aren't at a block height where it changes,
|
|
|
|
|
// just return this.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
oldDiff := curNode.bits
|
2019-08-06 13:42:04 +00:00
|
|
|
oldDiffBig := standalone.CompactToBig(curNode.bits)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// We're not at a retarget point, return the oldDiff.
|
|
|
|
|
if (curNode.height+1)%b.chainParams.WorkDiffWindowSize != 0 {
|
2016-08-10 21:02:23 +00:00
|
|
|
// For networks that support it, allow special reduction of the
|
|
|
|
|
// required difficulty once too much time has elapsed without
|
|
|
|
|
// mining a block.
|
|
|
|
|
if b.chainParams.ReduceMinDifficulty {
|
|
|
|
|
// Return minimum difficulty when more than the desired
|
|
|
|
|
// amount of time has elapsed without mining a block.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
reductionTime := int64(b.chainParams.MinDiffReductionTime /
|
|
|
|
|
time.Second)
|
|
|
|
|
allowMinTime := curNode.timestamp + reductionTime
|
|
|
|
|
if newBlockTime.Unix() > allowMinTime {
|
2018-08-08 11:22:40 +00:00
|
|
|
return b.chainParams.PowLimitBits, nil
|
2013-07-24 21:43:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The block was mined within the desired timeframe, so
|
|
|
|
|
// return the difficulty for the last block which did
|
|
|
|
|
// not have the special minimum difficulty rule applied.
|
2018-05-27 02:47:45 +00:00
|
|
|
return b.findPrevTestNetDifficulty(curNode), nil
|
2014-05-26 15:27:50 +00:00
|
|
|
}
|
2013-07-24 21:43:39 +00:00
|
|
|
|
2016-01-20 21:46:42 +00:00
|
|
|
return oldDiff, nil
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
2016-01-20 21:46:42 +00:00
|
|
|
// Declare some useful variables.
|
|
|
|
|
RAFBig := big.NewInt(b.chainParams.RetargetAdjustmentFactor)
|
2019-08-06 13:42:04 +00:00
|
|
|
nextDiffBigMin := standalone.CompactToBig(curNode.bits)
|
2016-01-20 21:46:42 +00:00
|
|
|
nextDiffBigMin.Div(nextDiffBigMin, RAFBig)
|
2019-08-06 13:42:04 +00:00
|
|
|
nextDiffBigMax := standalone.CompactToBig(curNode.bits)
|
2016-01-20 21:46:42 +00:00
|
|
|
nextDiffBigMax.Mul(nextDiffBigMax, RAFBig)
|
|
|
|
|
|
|
|
|
|
alpha := b.chainParams.WorkDiffAlpha
|
|
|
|
|
|
|
|
|
|
// Number of nodes to traverse while calculating difficulty.
|
2016-03-09 03:16:06 +00:00
|
|
|
nodesToTraverse := (b.chainParams.WorkDiffWindowSize *
|
|
|
|
|
b.chainParams.WorkDiffWindows)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// Initialize bigInt slice for the percentage changes for each window period
|
|
|
|
|
// above or below the target.
|
|
|
|
|
windowChanges := make([]*big.Int, b.chainParams.WorkDiffWindows)
|
|
|
|
|
|
|
|
|
|
// Regress through all of the previous blocks and store the percent changes
|
|
|
|
|
// per window period; use bigInts to emulate 64.32 bit fixed point.
|
2017-10-12 19:24:14 +00:00
|
|
|
var olderTime, windowPeriod int64
|
|
|
|
|
var weights uint64
|
2016-01-20 21:46:42 +00:00
|
|
|
oldNode := curNode
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
recentTime := curNode.timestamp
|
2016-01-20 21:46:42 +00:00
|
|
|
|
2016-03-09 03:16:06 +00:00
|
|
|
for i := int64(0); ; i++ {
|
2016-01-20 21:46:42 +00:00
|
|
|
// Store and reset after reaching the end of every window period.
|
|
|
|
|
if i%b.chainParams.WorkDiffWindowSize == 0 && i != 0 {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
olderTime = oldNode.timestamp
|
2016-01-20 21:46:42 +00:00
|
|
|
timeDifference := recentTime - olderTime
|
|
|
|
|
|
|
|
|
|
// Just assume we're at the target (no change) if we've
|
|
|
|
|
// gone all the way back to the genesis block.
|
|
|
|
|
if oldNode.height == 0 {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
timeDifference = int64(b.chainParams.TargetTimespan /
|
|
|
|
|
time.Second)
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeDifBig := big.NewInt(timeDifference)
|
|
|
|
|
timeDifBig.Lsh(timeDifBig, 32) // Add padding
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
targetTemp := big.NewInt(int64(b.chainParams.TargetTimespan /
|
|
|
|
|
time.Second))
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
windowAdjusted := targetTemp.Div(timeDifBig, targetTemp)
|
|
|
|
|
|
|
|
|
|
// Weight it exponentially. Be aware that this could at some point
|
|
|
|
|
// overflow if alpha or the number of blocks used is really large.
|
|
|
|
|
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
|
|
|
|
|
uint((b.chainParams.WorkDiffWindows-windowPeriod)*alpha))
|
|
|
|
|
|
|
|
|
|
// Sum up all the different weights incrementally.
|
|
|
|
|
weights += 1 << uint64((b.chainParams.WorkDiffWindows-windowPeriod)*
|
|
|
|
|
alpha)
|
|
|
|
|
|
|
|
|
|
// Store it in the slice.
|
|
|
|
|
windowChanges[windowPeriod] = windowAdjusted
|
|
|
|
|
|
|
|
|
|
windowPeriod++
|
|
|
|
|
|
|
|
|
|
recentTime = olderTime
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if i == nodesToTraverse {
|
|
|
|
|
break // Exit for loop when we hit the end.
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
// Get the previous node while staying at the genesis block as
|
|
|
|
|
// needed.
|
|
|
|
|
if oldNode.parent != nil {
|
|
|
|
|
oldNode = oldNode.parent
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
2016-01-20 21:46:42 +00:00
|
|
|
// Sum up the weighted window periods.
|
|
|
|
|
weightedSum := big.NewInt(0)
|
|
|
|
|
for i := int64(0); i < b.chainParams.WorkDiffWindows; i++ {
|
|
|
|
|
weightedSum.Add(weightedSum, windowChanges[i])
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
2016-01-20 21:46:42 +00:00
|
|
|
// Divide by the sum of all weights.
|
|
|
|
|
weightsBig := big.NewInt(int64(weights))
|
|
|
|
|
weightedSumDiv := weightedSum.Div(weightedSum, weightsBig)
|
|
|
|
|
|
|
|
|
|
// Multiply by the old diff.
|
|
|
|
|
nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Right shift to restore the original padding (restore non-fixed point).
|
|
|
|
|
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiffBig.Cmp(bigZero) == 0 { // This should never really happen,
|
|
|
|
|
nextDiffBig.Set(nextDiffBig) // but in case it does...
|
|
|
|
|
} else if nextDiffBig.Cmp(bigZero) == 0 {
|
|
|
|
|
nextDiffBig.Set(b.chainParams.PowLimit)
|
|
|
|
|
} else if nextDiffBig.Cmp(nextDiffBigMax) == 1 {
|
|
|
|
|
nextDiffBig.Set(nextDiffBigMax)
|
|
|
|
|
} else if nextDiffBig.Cmp(nextDiffBigMin) == -1 {
|
|
|
|
|
nextDiffBig.Set(nextDiffBigMin)
|
|
|
|
|
}
|
2013-07-18 14:49:28 +00:00
|
|
|
|
|
|
|
|
// Limit new value to the proof of work limit.
|
2016-01-20 21:46:42 +00:00
|
|
|
if nextDiffBig.Cmp(b.chainParams.PowLimit) > 0 {
|
|
|
|
|
nextDiffBig.Set(b.chainParams.PowLimit)
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Log new target difficulty and return it. The new target logging is
|
|
|
|
|
// intentionally converting the bits back to a number instead of using
|
|
|
|
|
// newTarget since conversion to the compact representation loses
|
|
|
|
|
// precision.
|
2019-08-06 13:42:04 +00:00
|
|
|
nextDiffBits := standalone.BigToCompact(nextDiffBig)
|
2016-01-20 21:46:42 +00:00
|
|
|
log.Debugf("Difficulty retarget at block height %d", curNode.height+1)
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
log.Debugf("Old target %08x (%064x)", curNode.bits, oldDiffBig)
|
2019-08-06 13:42:04 +00:00
|
|
|
log.Debugf("New target %08x (%064x)", nextDiffBits, standalone.CompactToBig(
|
|
|
|
|
nextDiffBits))
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
return nextDiffBits, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CalcNextRequiredDiffFromNode calculates the required difficulty for the block
|
|
|
|
|
// given with the passed hash along with the given timestamp.
|
|
|
|
|
//
|
|
|
|
|
// This function is NOT safe for concurrent access.
|
2018-01-27 07:51:31 +00:00
|
|
|
func (b *BlockChain) CalcNextRequiredDiffFromNode(hash *chainhash.Hash, timestamp time.Time) (uint32, error) {
|
2018-05-27 02:47:45 +00:00
|
|
|
node := b.index.LookupNode(hash)
|
|
|
|
|
if node == nil {
|
|
|
|
|
return 0, fmt.Errorf("block %s is not known", hash)
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return b.calcNextRequiredDifficulty(node, timestamp)
|
2013-07-18 14:49:28 +00:00
|
|
|
}
|
2014-03-02 18:17:36 +00:00
|
|
|
|
|
|
|
|
// CalcNextRequiredDifficulty calculates the required difficulty for the block
|
|
|
|
|
// after the end of the current best chain based on the difficulty retarget
|
|
|
|
|
// rules.
|
|
|
|
|
//
|
2015-08-26 04:03:18 +00:00
|
|
|
// This function is safe for concurrent access.
|
2018-01-27 07:51:31 +00:00
|
|
|
func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
|
2015-08-26 04:03:18 +00:00
|
|
|
b.chainLock.Lock()
|
2018-07-09 20:00:15 +00:00
|
|
|
difficulty, err := b.calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp)
|
2015-08-26 04:03:18 +00:00
|
|
|
b.chainLock.Unlock()
|
|
|
|
|
return difficulty, err
|
2014-03-02 18:17:36 +00:00
|
|
|
}
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// mergeDifficulty takes an original stake difficulty and two new, scaled
|
|
|
|
|
// stake difficulties, merges the new difficulties, and outputs a new
|
|
|
|
|
// merged stake difficulty.
|
|
|
|
|
func mergeDifficulty(oldDiff int64, newDiff1 int64, newDiff2 int64) int64 {
|
|
|
|
|
newDiff1Big := big.NewInt(newDiff1)
|
|
|
|
|
newDiff2Big := big.NewInt(newDiff2)
|
|
|
|
|
newDiff2Big.Lsh(newDiff2Big, 32)
|
|
|
|
|
|
|
|
|
|
oldDiffBig := big.NewInt(oldDiff)
|
|
|
|
|
oldDiffBigLSH := big.NewInt(oldDiff)
|
|
|
|
|
oldDiffBigLSH.Lsh(oldDiffBig, 32)
|
|
|
|
|
|
|
|
|
|
newDiff1Big.Div(oldDiffBigLSH, newDiff1Big)
|
|
|
|
|
newDiff2Big.Div(newDiff2Big, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Combine the two changes in difficulty.
|
|
|
|
|
summedChange := big.NewInt(0)
|
|
|
|
|
summedChange.Set(newDiff2Big)
|
|
|
|
|
summedChange.Lsh(summedChange, 32)
|
|
|
|
|
summedChange.Div(summedChange, newDiff1Big)
|
|
|
|
|
summedChange.Mul(summedChange, oldDiffBig)
|
|
|
|
|
summedChange.Rsh(summedChange, 32)
|
|
|
|
|
|
|
|
|
|
return summedChange.Int64()
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// calcNextRequiredStakeDifficultyV1 calculates the required stake difficulty
|
|
|
|
|
// for the block after the passed previous block node based on exponentially
|
|
|
|
|
// weighted averages.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: This is the original stake difficulty algorithm that was used at Decred
|
|
|
|
|
// launch.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) calcNextRequiredStakeDifficultyV1(curNode *blockNode) (int64, error) {
|
2016-01-20 21:46:42 +00:00
|
|
|
alpha := b.chainParams.StakeDiffAlpha
|
2016-03-09 03:16:06 +00:00
|
|
|
stakeDiffStartHeight := int64(b.chainParams.CoinbaseMaturity) +
|
2016-01-20 21:46:42 +00:00
|
|
|
1
|
2017-03-08 20:44:15 +00:00
|
|
|
maxRetarget := b.chainParams.RetargetAdjustmentFactor
|
2016-01-20 21:46:42 +00:00
|
|
|
TicketPoolWeight := int64(b.chainParams.TicketPoolSizeWeight)
|
|
|
|
|
|
|
|
|
|
// Number of nodes to traverse while calculating difficulty.
|
2016-03-09 03:16:06 +00:00
|
|
|
nodesToTraverse := (b.chainParams.StakeDiffWindowSize *
|
|
|
|
|
b.chainParams.StakeDiffWindows)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// Genesis block. Block at height 1 has these parameters.
|
|
|
|
|
// Additionally, if we're before the time when people generally begin
|
|
|
|
|
// purchasing tickets, just use the MinimumStakeDiff.
|
|
|
|
|
// This is sort of sloppy and coded with the hopes that generally by
|
|
|
|
|
// stakeDiffStartHeight people will be submitting lots of SStx over the
|
|
|
|
|
// past nodesToTraverse many nodes. It should be okay with the default
|
|
|
|
|
// Decred parameters, but might do weird things if you use custom
|
|
|
|
|
// parameters.
|
|
|
|
|
if curNode == nil ||
|
|
|
|
|
curNode.height < stakeDiffStartHeight {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get the old difficulty; if we aren't at a block height where it changes,
|
|
|
|
|
// just return this.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
oldDiff := curNode.sbits
|
2016-01-20 21:46:42 +00:00
|
|
|
if (curNode.height+1)%b.chainParams.StakeDiffWindowSize != 0 {
|
|
|
|
|
return oldDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The target size of the ticketPool in live tickets. Recast these as int64
|
|
|
|
|
// to avoid possible overflows for large sizes of either variable in
|
|
|
|
|
// params.
|
|
|
|
|
targetForTicketPool := int64(b.chainParams.TicketsPerBlock) *
|
|
|
|
|
int64(b.chainParams.TicketPoolSize)
|
|
|
|
|
|
|
|
|
|
// Initialize bigInt slice for the percentage changes for each window period
|
|
|
|
|
// above or below the target.
|
|
|
|
|
windowChanges := make([]*big.Int, b.chainParams.StakeDiffWindows)
|
|
|
|
|
|
|
|
|
|
// Regress through all of the previous blocks and store the percent changes
|
|
|
|
|
// per window period; use bigInts to emulate 64.32 bit fixed point.
|
|
|
|
|
oldNode := curNode
|
|
|
|
|
windowPeriod := int64(0)
|
|
|
|
|
weights := uint64(0)
|
|
|
|
|
|
2016-03-09 03:16:06 +00:00
|
|
|
for i := int64(0); ; i++ {
|
2016-01-20 21:46:42 +00:00
|
|
|
// Store and reset after reaching the end of every window period.
|
|
|
|
|
if (i+1)%b.chainParams.StakeDiffWindowSize == 0 {
|
|
|
|
|
// First adjust based on ticketPoolSize. Skew the difference
|
|
|
|
|
// in ticketPoolSize by max adjustment factor to help
|
|
|
|
|
// weight ticket pool size versus tickets per block.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
poolSizeSkew := (int64(oldNode.poolSize)-
|
2016-01-20 21:46:42 +00:00
|
|
|
targetForTicketPool)*TicketPoolWeight + targetForTicketPool
|
|
|
|
|
|
|
|
|
|
// Don't let this be negative or zero.
|
|
|
|
|
if poolSizeSkew <= 0 {
|
|
|
|
|
poolSizeSkew = 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
curPoolSizeTemp := big.NewInt(poolSizeSkew)
|
|
|
|
|
curPoolSizeTemp.Lsh(curPoolSizeTemp, 32) // Add padding
|
|
|
|
|
targetTemp := big.NewInt(targetForTicketPool)
|
|
|
|
|
|
|
|
|
|
windowAdjusted := curPoolSizeTemp.Div(curPoolSizeTemp, targetTemp)
|
|
|
|
|
|
|
|
|
|
// Weight it exponentially. Be aware that this could at some point
|
|
|
|
|
// overflow if alpha or the number of blocks used is really large.
|
|
|
|
|
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
|
|
|
|
|
uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha))
|
|
|
|
|
|
|
|
|
|
// Sum up all the different weights incrementally.
|
|
|
|
|
weights += 1 << uint64((b.chainParams.StakeDiffWindows-windowPeriod)*
|
|
|
|
|
alpha)
|
|
|
|
|
|
|
|
|
|
// Store it in the slice.
|
|
|
|
|
windowChanges[windowPeriod] = windowAdjusted
|
|
|
|
|
|
|
|
|
|
// windowFreshStake = 0
|
|
|
|
|
windowPeriod++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i + 1) == nodesToTraverse {
|
|
|
|
|
break // Exit for loop when we hit the end.
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
// Get the previous node while staying at the genesis block as
|
|
|
|
|
// needed.
|
|
|
|
|
if oldNode.parent != nil {
|
|
|
|
|
oldNode = oldNode.parent
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sum up the weighted window periods.
|
|
|
|
|
weightedSum := big.NewInt(0)
|
|
|
|
|
for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ {
|
|
|
|
|
weightedSum.Add(weightedSum, windowChanges[i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Divide by the sum of all weights.
|
|
|
|
|
weightsBig := big.NewInt(int64(weights))
|
|
|
|
|
weightedSumDiv := weightedSum.Div(weightedSum, weightsBig)
|
|
|
|
|
|
|
|
|
|
// Multiply by the old stake diff.
|
|
|
|
|
oldDiffBig := big.NewInt(oldDiff)
|
|
|
|
|
nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Right shift to restore the original padding (restore non-fixed point).
|
|
|
|
|
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
|
|
|
|
|
nextDiffTicketPool := nextDiffBig.Int64()
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return nextDiffTicketPool, nil
|
|
|
|
|
} else if nextDiffTicketPool == 0 {
|
|
|
|
|
nextDiffTicketPool = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiffTicketPool / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffTicketPool = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiffTicketPool) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffTicketPool = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The target number of new SStx per block for any given window period.
|
|
|
|
|
targetForWindow := b.chainParams.StakeDiffWindowSize *
|
2016-03-09 03:16:06 +00:00
|
|
|
int64(b.chainParams.TicketsPerBlock)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// Regress through all of the previous blocks and store the percent changes
|
|
|
|
|
// per window period; use bigInts to emulate 64.32 bit fixed point.
|
|
|
|
|
oldNode = curNode
|
|
|
|
|
windowFreshStake := int64(0)
|
|
|
|
|
windowPeriod = int64(0)
|
|
|
|
|
weights = uint64(0)
|
|
|
|
|
|
2016-03-09 03:16:06 +00:00
|
|
|
for i := int64(0); ; i++ {
|
2016-01-20 21:46:42 +00:00
|
|
|
// Add the fresh stake into the store for this window period.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
windowFreshStake += int64(oldNode.freshStake)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// Store and reset after reaching the end of every window period.
|
|
|
|
|
if (i+1)%b.chainParams.StakeDiffWindowSize == 0 {
|
|
|
|
|
// Don't let fresh stake be zero.
|
|
|
|
|
if windowFreshStake <= 0 {
|
|
|
|
|
windowFreshStake = 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
freshTemp := big.NewInt(windowFreshStake)
|
|
|
|
|
freshTemp.Lsh(freshTemp, 32) // Add padding
|
2016-03-09 03:16:06 +00:00
|
|
|
targetTemp := big.NewInt(targetForWindow)
|
2016-01-20 21:46:42 +00:00
|
|
|
|
|
|
|
|
// Get the percentage change.
|
|
|
|
|
windowAdjusted := freshTemp.Div(freshTemp, targetTemp)
|
|
|
|
|
|
|
|
|
|
// Weight it exponentially. Be aware that this could at some point
|
|
|
|
|
// overflow if alpha or the number of blocks used is really large.
|
|
|
|
|
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
|
|
|
|
|
uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha))
|
|
|
|
|
|
|
|
|
|
// Sum up all the different weights incrementally.
|
|
|
|
|
weights += 1 <<
|
|
|
|
|
uint64((b.chainParams.StakeDiffWindows-windowPeriod)*alpha)
|
|
|
|
|
|
|
|
|
|
// Store it in the slice.
|
|
|
|
|
windowChanges[windowPeriod] = windowAdjusted
|
|
|
|
|
|
|
|
|
|
windowFreshStake = 0
|
|
|
|
|
windowPeriod++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i + 1) == nodesToTraverse {
|
|
|
|
|
break // Exit for loop when we hit the end.
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
// Get the previous node while staying at the genesis block as
|
|
|
|
|
// needed.
|
|
|
|
|
if oldNode.parent != nil {
|
|
|
|
|
oldNode = oldNode.parent
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sum up the weighted window periods.
|
|
|
|
|
weightedSum = big.NewInt(0)
|
|
|
|
|
for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ {
|
|
|
|
|
weightedSum.Add(weightedSum, windowChanges[i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Divide by the sum of all weights.
|
|
|
|
|
weightsBig = big.NewInt(int64(weights))
|
|
|
|
|
weightedSumDiv = weightedSum.Div(weightedSum, weightsBig)
|
|
|
|
|
|
|
|
|
|
// Multiply by the old stake diff.
|
|
|
|
|
oldDiffBig = big.NewInt(oldDiff)
|
|
|
|
|
nextDiffBig = weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Right shift to restore the original padding (restore non-fixed point).
|
|
|
|
|
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
|
|
|
|
|
nextDiffFreshStake := nextDiffBig.Int64()
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return nextDiffFreshStake, nil
|
|
|
|
|
} else if nextDiffFreshStake == 0 {
|
|
|
|
|
nextDiffFreshStake = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiffFreshStake / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffFreshStake = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiffFreshStake) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffFreshStake = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Average the two differences using scaled multiplication.
|
|
|
|
|
nextDiff := mergeDifficulty(oldDiff, nextDiffTicketPool, nextDiffFreshStake)
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return oldDiff, nil
|
|
|
|
|
} else if nextDiff == 0 {
|
|
|
|
|
nextDiff = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiff / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiff = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiff = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the next diff is below the network minimum, set the required stake
|
|
|
|
|
// difficulty to the minimum.
|
|
|
|
|
if nextDiff < b.chainParams.MinimumStakeDiff {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nextDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// estimateSupply returns an estimate of the coin supply for the provided block
|
|
|
|
|
// height. This is primarily used in the stake difficulty algorithm and relies
|
|
|
|
|
// on an estimate to simplify the necessary calculations. The actual total
|
|
|
|
|
// coin supply as of a given block height depends on many factors such as the
|
|
|
|
|
// number of votes included in every prior block (not including all votes
|
|
|
|
|
// reduces the subsidy) and whether or not any of the prior blocks have been
|
|
|
|
|
// invalidated by stakeholders thereby removing the PoW subsidy for them.
|
|
|
|
|
//
|
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
|
func estimateSupply(params *chaincfg.Params, height int64) int64 {
|
|
|
|
|
if height <= 0 {
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Estimate the supply by calculating the full block subsidy for each
|
|
|
|
|
// reduction interval and multiplying it the number of blocks in the
|
|
|
|
|
// interval then adding the subsidy produced by number of blocks in the
|
|
|
|
|
// current interval.
|
|
|
|
|
supply := params.BlockOneSubsidy()
|
2017-10-10 21:20:40 +00:00
|
|
|
reductions := height / params.SubsidyReductionInterval
|
2017-04-24 08:41:40 +00:00
|
|
|
subsidy := params.BaseSubsidy
|
|
|
|
|
for i := int64(0); i < reductions; i++ {
|
|
|
|
|
supply += params.SubsidyReductionInterval * subsidy
|
|
|
|
|
|
|
|
|
|
subsidy *= params.MulSubsidy
|
|
|
|
|
subsidy /= params.DivSubsidy
|
|
|
|
|
}
|
2017-10-10 21:20:40 +00:00
|
|
|
supply += (1 + height%params.SubsidyReductionInterval) * subsidy
|
2017-04-24 08:41:40 +00:00
|
|
|
|
|
|
|
|
// Blocks 0 and 1 have special subsidy amounts that have already been
|
|
|
|
|
// added above, so remove what their subsidies would have normally been
|
|
|
|
|
// which were also added above.
|
|
|
|
|
supply -= params.BaseSubsidy * 2
|
|
|
|
|
|
|
|
|
|
return supply
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// sumPurchasedTickets returns the sum of the number of tickets purchased in the
|
|
|
|
|
// most recent specified number of blocks from the point of view of the passed
|
|
|
|
|
// node.
|
2018-05-27 02:47:45 +00:00
|
|
|
func (b *BlockChain) sumPurchasedTickets(startNode *blockNode, numToSum int64) int64 {
|
2017-04-24 08:41:40 +00:00
|
|
|
var numPurchased int64
|
|
|
|
|
for node, numTraversed := startNode, int64(0); node != nil &&
|
|
|
|
|
numTraversed < numToSum; numTraversed++ {
|
|
|
|
|
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
numPurchased += int64(node.freshStake)
|
2018-05-27 02:47:45 +00:00
|
|
|
node = node.parent
|
2017-04-24 08:41:40 +00:00
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
return numPurchased
|
2017-04-24 08:41:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// calcNextStakeDiffV2 calculates the next stake difficulty for the given set
|
|
|
|
|
// of parameters using the algorithm defined in DCP0001.
|
|
|
|
|
//
|
|
|
|
|
// This function contains the heart of the algorithm and thus is separated for
|
|
|
|
|
// use in both the actual stake difficulty calculation as well as estimation.
|
|
|
|
|
//
|
|
|
|
|
// The caller must perform all of the necessary chain traversal in order to
|
|
|
|
|
// get the current difficulty, previous retarget interval's pool size plus
|
|
|
|
|
// its immature tickets, as well as the current pool size plus immature tickets.
|
|
|
|
|
//
|
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
|
func calcNextStakeDiffV2(params *chaincfg.Params, nextHeight, curDiff, prevPoolSizeAll, curPoolSizeAll int64) int64 {
|
|
|
|
|
// Shorter version of various parameter for convenience.
|
|
|
|
|
votesPerBlock := int64(params.TicketsPerBlock)
|
|
|
|
|
ticketPoolSize := int64(params.TicketPoolSize)
|
|
|
|
|
ticketMaturity := int64(params.TicketMaturity)
|
|
|
|
|
|
|
|
|
|
// Calculate the difficulty by multiplying the old stake difficulty
|
|
|
|
|
// with two ratios that represent a force to counteract the relative
|
|
|
|
|
// change in the pool size (Fc) and a restorative force to push the pool
|
|
|
|
|
// size towards the target value (Fr).
|
|
|
|
|
//
|
|
|
|
|
// Per DCP0001, the generalized equation is:
|
|
|
|
|
//
|
|
|
|
|
// nextDiff = min(max(curDiff * Fc * Fr, Slb), Sub)
|
|
|
|
|
//
|
|
|
|
|
// The detailed form expands to:
|
|
|
|
|
//
|
|
|
|
|
// curPoolSizeAll curPoolSizeAll
|
|
|
|
|
// nextDiff = curDiff * --------------- * -----------------
|
|
|
|
|
// prevPoolSizeAll targetPoolSizeAll
|
|
|
|
|
//
|
|
|
|
|
// Slb = b.chainParams.MinimumStakeDiff
|
|
|
|
|
//
|
|
|
|
|
// estimatedTotalSupply
|
|
|
|
|
// Sub = -------------------------------
|
|
|
|
|
// targetPoolSize / votesPerBlock
|
|
|
|
|
//
|
|
|
|
|
// In order to avoid the need to perform floating point math which could
|
2018-02-18 18:07:06 +00:00
|
|
|
// be problematic across languages due to uncertainty in floating point
|
2017-04-24 08:41:40 +00:00
|
|
|
// math libs, this is further simplified to integer math as follows:
|
|
|
|
|
//
|
|
|
|
|
// curDiff * curPoolSizeAll^2
|
|
|
|
|
// nextDiff = -----------------------------------
|
|
|
|
|
// prevPoolSizeAll * targetPoolSizeAll
|
|
|
|
|
//
|
2019-08-16 22:37:58 +00:00
|
|
|
// Further, the Sub parameter must calculate the denominator first using
|
2017-04-24 08:41:40 +00:00
|
|
|
// integer math.
|
|
|
|
|
targetPoolSizeAll := votesPerBlock * (ticketPoolSize + ticketMaturity)
|
|
|
|
|
curPoolSizeAllBig := big.NewInt(curPoolSizeAll)
|
|
|
|
|
nextDiffBig := big.NewInt(curDiff)
|
|
|
|
|
nextDiffBig.Mul(nextDiffBig, curPoolSizeAllBig)
|
|
|
|
|
nextDiffBig.Mul(nextDiffBig, curPoolSizeAllBig)
|
|
|
|
|
nextDiffBig.Div(nextDiffBig, big.NewInt(prevPoolSizeAll))
|
|
|
|
|
nextDiffBig.Div(nextDiffBig, big.NewInt(targetPoolSizeAll))
|
|
|
|
|
|
|
|
|
|
// Limit the new stake difficulty between the minimum allowed stake
|
|
|
|
|
// difficulty and a maximum value that is relative to the total supply.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: This is intentionally using integer math to prevent any
|
|
|
|
|
// potential issues due to uncertainty in floating point math libs. The
|
|
|
|
|
// ticketPoolSize parameter already contains the result of
|
|
|
|
|
// (targetPoolSize / votesPerBlock).
|
|
|
|
|
nextDiff := nextDiffBig.Int64()
|
|
|
|
|
estimatedSupply := estimateSupply(params, nextHeight)
|
|
|
|
|
maximumStakeDiff := estimatedSupply / ticketPoolSize
|
|
|
|
|
if nextDiff > maximumStakeDiff {
|
|
|
|
|
nextDiff = maximumStakeDiff
|
|
|
|
|
}
|
|
|
|
|
if nextDiff < params.MinimumStakeDiff {
|
|
|
|
|
nextDiff = params.MinimumStakeDiff
|
|
|
|
|
}
|
|
|
|
|
return nextDiff
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// calcNextRequiredStakeDifficultyV2 calculates the required stake difficulty
|
|
|
|
|
// for the block after the passed previous block node based on the algorithm
|
|
|
|
|
// defined in DCP0001.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) calcNextRequiredStakeDifficultyV2(curNode *blockNode) (int64, error) {
|
|
|
|
|
// Stake difficulty before any tickets could possibly be purchased is
|
|
|
|
|
// the minimum value.
|
|
|
|
|
nextHeight := int64(0)
|
|
|
|
|
if curNode != nil {
|
|
|
|
|
nextHeight = curNode.height + 1
|
|
|
|
|
}
|
|
|
|
|
stakeDiffStartHeight := int64(b.chainParams.CoinbaseMaturity) + 1
|
|
|
|
|
if nextHeight < stakeDiffStartHeight {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return the previous block's difficulty requirements if the next block
|
|
|
|
|
// is not at a difficulty retarget interval.
|
|
|
|
|
intervalSize := b.chainParams.StakeDiffWindowSize
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
curDiff := curNode.sbits
|
2017-04-24 08:41:40 +00:00
|
|
|
if nextHeight%intervalSize != 0 {
|
|
|
|
|
return curDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get the pool size and number of tickets that were immature at the
|
|
|
|
|
// previous retarget interval.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: Since the stake difficulty must be calculated based on existing
|
|
|
|
|
// blocks, it is always calculated for the block after a given block, so
|
|
|
|
|
// the information for the previous retarget interval must be retrieved
|
|
|
|
|
// relative to the block just before it to coincide with how it was
|
|
|
|
|
// originally calculated.
|
|
|
|
|
var prevPoolSize int64
|
|
|
|
|
prevRetargetHeight := nextHeight - intervalSize - 1
|
2018-05-27 02:47:45 +00:00
|
|
|
prevRetargetNode := curNode.Ancestor(prevRetargetHeight)
|
2017-04-24 08:41:40 +00:00
|
|
|
if prevRetargetNode != nil {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
prevPoolSize = int64(prevRetargetNode.poolSize)
|
2017-04-24 08:41:40 +00:00
|
|
|
}
|
|
|
|
|
ticketMaturity := int64(b.chainParams.TicketMaturity)
|
2018-05-27 02:47:45 +00:00
|
|
|
prevImmatureTickets := b.sumPurchasedTickets(prevRetargetNode,
|
2017-04-24 08:41:40 +00:00
|
|
|
ticketMaturity)
|
|
|
|
|
|
|
|
|
|
// Return the existing ticket price for the first few intervals to avoid
|
|
|
|
|
// division by zero and encourage initial pool population.
|
|
|
|
|
prevPoolSizeAll := prevPoolSize + prevImmatureTickets
|
|
|
|
|
if prevPoolSizeAll == 0 {
|
|
|
|
|
return curDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Count the number of currently immature tickets.
|
2018-05-27 02:47:45 +00:00
|
|
|
immatureTickets := b.sumPurchasedTickets(curNode, ticketMaturity)
|
2017-04-24 08:41:40 +00:00
|
|
|
|
|
|
|
|
// Calculate and return the final next required difficulty.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
curPoolSizeAll := int64(curNode.poolSize) + immatureTickets
|
2017-04-24 08:41:40 +00:00
|
|
|
return calcNextStakeDiffV2(b.chainParams, nextHeight, curDiff,
|
|
|
|
|
prevPoolSizeAll, curPoolSizeAll), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// calcNextRequiredStakeDifficulty calculates the required stake difficulty for
|
|
|
|
|
// the block after the passed previous block node based on the active stake
|
|
|
|
|
// difficulty retarget rules.
|
|
|
|
|
//
|
|
|
|
|
// This function differs from the exported CalcNextRequiredDifficulty in that
|
|
|
|
|
// the exported version uses the current best chain as the previous block node
|
|
|
|
|
// while this function accepts any block node.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) calcNextRequiredStakeDifficulty(curNode *blockNode) (int64, error) {
|
2019-01-28 07:54:35 +00:00
|
|
|
// Determine the correct deployment version for the new stake difficulty
|
|
|
|
|
// algorithm consensus vote or treat it as active when voting is not enabled
|
|
|
|
|
// for the current network.
|
|
|
|
|
const deploymentID = chaincfg.VoteIDSDiffAlgorithm
|
|
|
|
|
deploymentVer, ok := b.deploymentVers[deploymentID]
|
|
|
|
|
if !ok {
|
2018-08-08 11:23:15 +00:00
|
|
|
return b.calcNextRequiredStakeDifficultyV2(curNode)
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// Use the new stake difficulty algorithm if the stake vote for the new
|
|
|
|
|
// algorithm agenda is active.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: The choice field of the return threshold state is not examined
|
|
|
|
|
// here because there is only one possible choice that can be active
|
|
|
|
|
// for the agenda, which is yes, so there is no need to check it.
|
2019-01-28 07:54:35 +00:00
|
|
|
state, err := b.deploymentState(curNode, deploymentVer, deploymentID)
|
2017-04-24 08:41:40 +00:00
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
if state.State == ThresholdActive {
|
|
|
|
|
return b.calcNextRequiredStakeDifficultyV2(curNode)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use the old stake difficulty algorithm in any other case.
|
|
|
|
|
return b.calcNextRequiredStakeDifficultyV1(curNode)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CalcNextRequiredStakeDifficulty calculates the required stake difficulty for
|
|
|
|
|
// the block after the end of the current best chain based on the active stake
|
|
|
|
|
// difficulty retarget rules.
|
|
|
|
|
//
|
|
|
|
|
// This function is safe for concurrent access.
|
2016-01-20 21:46:42 +00:00
|
|
|
func (b *BlockChain) CalcNextRequiredStakeDifficulty() (int64, error) {
|
2017-04-24 08:41:40 +00:00
|
|
|
b.chainLock.Lock()
|
2018-07-09 20:00:15 +00:00
|
|
|
nextDiff, err := b.calcNextRequiredStakeDifficulty(b.bestChain.Tip())
|
2017-04-24 08:41:40 +00:00
|
|
|
b.chainLock.Unlock()
|
|
|
|
|
return nextDiff, err
|
2016-01-20 21:46:42 +00:00
|
|
|
}
|
2016-04-29 02:19:12 +00:00
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// estimateNextStakeDifficultyV1 estimates the next stake difficulty by
|
|
|
|
|
// pretending the provided number of tickets will be purchased in the remainder
|
|
|
|
|
// of the interval unless the flag to use max tickets is set in which case it
|
|
|
|
|
// will use the max possible number of tickets that can be purchased in the
|
|
|
|
|
// remainder of the interval.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: This uses the original stake difficulty algorithm that was used at
|
|
|
|
|
// Decred launch.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) estimateNextStakeDifficultyV1(curNode *blockNode, ticketsInWindow int64, useMaxTickets bool) (int64, error) {
|
2016-04-29 02:19:12 +00:00
|
|
|
alpha := b.chainParams.StakeDiffAlpha
|
|
|
|
|
stakeDiffStartHeight := int64(b.chainParams.CoinbaseMaturity) +
|
|
|
|
|
1
|
2017-03-08 20:44:15 +00:00
|
|
|
maxRetarget := b.chainParams.RetargetAdjustmentFactor
|
2016-04-29 02:19:12 +00:00
|
|
|
TicketPoolWeight := int64(b.chainParams.TicketPoolSizeWeight)
|
|
|
|
|
|
|
|
|
|
// Number of nodes to traverse while calculating difficulty.
|
|
|
|
|
nodesToTraverse := (b.chainParams.StakeDiffWindowSize *
|
|
|
|
|
b.chainParams.StakeDiffWindows)
|
|
|
|
|
|
|
|
|
|
// Genesis block. Block at height 1 has these parameters.
|
|
|
|
|
if curNode == nil ||
|
|
|
|
|
curNode.height < stakeDiffStartHeight {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create a fake blockchain on top of the current best node with
|
|
|
|
|
// the number of freshly purchased tickets as indicated by the
|
|
|
|
|
// user.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
oldDiff := curNode.sbits
|
2016-04-29 02:19:12 +00:00
|
|
|
topNode := curNode
|
|
|
|
|
if (curNode.height+1)%b.chainParams.StakeDiffWindowSize != 0 {
|
|
|
|
|
nextAdjHeight := ((curNode.height /
|
|
|
|
|
b.chainParams.StakeDiffWindowSize) + 1) *
|
|
|
|
|
b.chainParams.StakeDiffWindowSize
|
|
|
|
|
maxTickets := (nextAdjHeight - curNode.height) *
|
|
|
|
|
int64(b.chainParams.MaxFreshStakePerBlock)
|
|
|
|
|
|
|
|
|
|
// If the user has indicated that the automatically
|
|
|
|
|
// calculated maximum amount of tickets should be
|
|
|
|
|
// used, plug that in here.
|
|
|
|
|
if useMaxTickets {
|
|
|
|
|
ticketsInWindow = maxTickets
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Double check to make sure there isn't too much.
|
|
|
|
|
if ticketsInWindow > maxTickets {
|
|
|
|
|
return 0, fmt.Errorf("too much fresh stake to be used "+
|
|
|
|
|
"in evaluation requested; max %v, got %v", maxTickets,
|
|
|
|
|
ticketsInWindow)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Insert all the tickets into bogus nodes that will be
|
|
|
|
|
// used to calculate the next difficulty below.
|
|
|
|
|
ticketsToInsert := ticketsInWindow
|
|
|
|
|
for i := curNode.height + 1; i < nextAdjHeight; i++ {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
var emptyHeader wire.BlockHeader
|
2016-04-29 02:19:12 +00:00
|
|
|
emptyHeader.Height = uint32(i)
|
|
|
|
|
|
|
|
|
|
// User a constant pool size for estimate, since
|
|
|
|
|
// this has much less fluctuation than freshStake.
|
|
|
|
|
// TODO Use a better pool size estimate?
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
emptyHeader.PoolSize = curNode.poolSize
|
2016-04-29 02:19:12 +00:00
|
|
|
|
|
|
|
|
// Insert the fake fresh stake into each block,
|
|
|
|
|
// decrementing the amount we need to use each
|
|
|
|
|
// time until we hit 0.
|
|
|
|
|
freshStake := b.chainParams.MaxFreshStakePerBlock
|
|
|
|
|
if int64(freshStake) > ticketsToInsert {
|
|
|
|
|
freshStake = uint8(ticketsToInsert)
|
|
|
|
|
ticketsToInsert -= ticketsToInsert
|
|
|
|
|
} else {
|
|
|
|
|
ticketsToInsert -= int64(b.chainParams.MaxFreshStakePerBlock)
|
|
|
|
|
}
|
|
|
|
|
emptyHeader.FreshStake = freshStake
|
|
|
|
|
|
|
|
|
|
// Connect the header.
|
Replace the ticket database with an efficient, atomic implementation
The legacy ticket database, which was GOB serialized and stored on
shut down, has been removed. Ticket state information is now held in
a stake node, which acts as a modularized "black box" to contain all
information about the state of the stake system. Stake nodes are now
a component of the blockchain blockNode struct, and are updated with
them.
Stake nodes, like their internal treap primitives, are immutable
objects that are created with their connect and disconnect node
functions. The blockchain database now stores all information about
the stake state of the best node in the block database. The blockchain
makes the assumption that the stake state of the best node is known at
any given time. If the states of former blocks or sidechains must be
evaluated, this can be achieved by iterating backwards along the
blockchain from the best node, and then connecting stake nodes
iteratively if necessary.
Performance improvements with this new module are dramatic. The long
delays on start up and shut down are removed. Blockchain
synchronization time is improved approximately 5-10x on the mainnet
chain. The state of the database is atomic, so unexpected shut downs
should no longer have the ability to disrupt the chain state.
An upgrade path has been added for version 1 blockchain databases.
Users with this blockchain database will automatically update when
they start their clients.
2015-08-26 09:54:55 +00:00
|
|
|
emptyHeader.PrevBlock = topNode.hash
|
2016-04-29 02:19:12 +00:00
|
|
|
|
2018-02-18 21:12:48 +00:00
|
|
|
thisNode := newBlockNode(&emptyHeader, topNode)
|
2016-04-29 02:19:12 +00:00
|
|
|
topNode = thisNode
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The target size of the ticketPool in live tickets. Recast these as int64
|
|
|
|
|
// to avoid possible overflows for large sizes of either variable in
|
|
|
|
|
// params.
|
|
|
|
|
targetForTicketPool := int64(b.chainParams.TicketsPerBlock) *
|
|
|
|
|
int64(b.chainParams.TicketPoolSize)
|
|
|
|
|
|
|
|
|
|
// Initialize bigInt slice for the percentage changes for each window period
|
|
|
|
|
// above or below the target.
|
|
|
|
|
windowChanges := make([]*big.Int, b.chainParams.StakeDiffWindows)
|
|
|
|
|
|
|
|
|
|
// Regress through all of the previous blocks and store the percent changes
|
|
|
|
|
// per window period; use bigInts to emulate 64.32 bit fixed point.
|
|
|
|
|
oldNode := topNode
|
|
|
|
|
windowPeriod := int64(0)
|
|
|
|
|
weights := uint64(0)
|
|
|
|
|
|
|
|
|
|
for i := int64(0); ; i++ {
|
|
|
|
|
// Store and reset after reaching the end of every window period.
|
|
|
|
|
if (i+1)%b.chainParams.StakeDiffWindowSize == 0 {
|
|
|
|
|
// First adjust based on ticketPoolSize. Skew the difference
|
|
|
|
|
// in ticketPoolSize by max adjustment factor to help
|
|
|
|
|
// weight ticket pool size versus tickets per block.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
poolSizeSkew := (int64(oldNode.poolSize)-
|
2016-04-29 02:19:12 +00:00
|
|
|
targetForTicketPool)*TicketPoolWeight + targetForTicketPool
|
|
|
|
|
|
|
|
|
|
// Don't let this be negative or zero.
|
|
|
|
|
if poolSizeSkew <= 0 {
|
|
|
|
|
poolSizeSkew = 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
curPoolSizeTemp := big.NewInt(poolSizeSkew)
|
|
|
|
|
curPoolSizeTemp.Lsh(curPoolSizeTemp, 32) // Add padding
|
|
|
|
|
targetTemp := big.NewInt(targetForTicketPool)
|
|
|
|
|
|
|
|
|
|
windowAdjusted := curPoolSizeTemp.Div(curPoolSizeTemp, targetTemp)
|
|
|
|
|
|
|
|
|
|
// Weight it exponentially. Be aware that this could at some point
|
|
|
|
|
// overflow if alpha or the number of blocks used is really large.
|
|
|
|
|
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
|
|
|
|
|
uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha))
|
|
|
|
|
|
|
|
|
|
// Sum up all the different weights incrementally.
|
|
|
|
|
weights += 1 << uint64((b.chainParams.StakeDiffWindows-windowPeriod)*
|
|
|
|
|
alpha)
|
|
|
|
|
|
|
|
|
|
// Store it in the slice.
|
|
|
|
|
windowChanges[windowPeriod] = windowAdjusted
|
|
|
|
|
|
|
|
|
|
// windowFreshStake = 0
|
|
|
|
|
windowPeriod++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i + 1) == nodesToTraverse {
|
|
|
|
|
break // Exit for loop when we hit the end.
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
// Get the previous node while staying at the genesis block as
|
|
|
|
|
// needed.
|
|
|
|
|
if oldNode.parent != nil {
|
|
|
|
|
oldNode = oldNode.parent
|
2016-04-29 02:19:12 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sum up the weighted window periods.
|
|
|
|
|
weightedSum := big.NewInt(0)
|
|
|
|
|
for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ {
|
|
|
|
|
weightedSum.Add(weightedSum, windowChanges[i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Divide by the sum of all weights.
|
|
|
|
|
weightsBig := big.NewInt(int64(weights))
|
|
|
|
|
weightedSumDiv := weightedSum.Div(weightedSum, weightsBig)
|
|
|
|
|
|
|
|
|
|
// Multiply by the old stake diff.
|
|
|
|
|
oldDiffBig := big.NewInt(oldDiff)
|
|
|
|
|
nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Right shift to restore the original padding (restore non-fixed point).
|
|
|
|
|
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
|
|
|
|
|
nextDiffTicketPool := nextDiffBig.Int64()
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return nextDiffTicketPool, nil
|
|
|
|
|
} else if nextDiffTicketPool == 0 {
|
|
|
|
|
nextDiffTicketPool = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiffTicketPool / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffTicketPool = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiffTicketPool) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffTicketPool = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The target number of new SStx per block for any given window period.
|
|
|
|
|
targetForWindow := b.chainParams.StakeDiffWindowSize *
|
|
|
|
|
int64(b.chainParams.TicketsPerBlock)
|
|
|
|
|
|
|
|
|
|
// Regress through all of the previous blocks and store the percent changes
|
|
|
|
|
// per window period; use bigInts to emulate 64.32 bit fixed point.
|
|
|
|
|
oldNode = topNode
|
|
|
|
|
windowFreshStake := int64(0)
|
|
|
|
|
windowPeriod = int64(0)
|
|
|
|
|
weights = uint64(0)
|
|
|
|
|
|
|
|
|
|
for i := int64(0); ; i++ {
|
|
|
|
|
// Add the fresh stake into the store for this window period.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
windowFreshStake += int64(oldNode.freshStake)
|
2016-04-29 02:19:12 +00:00
|
|
|
|
|
|
|
|
// Store and reset after reaching the end of every window period.
|
|
|
|
|
if (i+1)%b.chainParams.StakeDiffWindowSize == 0 {
|
|
|
|
|
// Don't let fresh stake be zero.
|
|
|
|
|
if windowFreshStake <= 0 {
|
|
|
|
|
windowFreshStake = 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
freshTemp := big.NewInt(windowFreshStake)
|
|
|
|
|
freshTemp.Lsh(freshTemp, 32) // Add padding
|
|
|
|
|
targetTemp := big.NewInt(targetForWindow)
|
|
|
|
|
|
|
|
|
|
// Get the percentage change.
|
|
|
|
|
windowAdjusted := freshTemp.Div(freshTemp, targetTemp)
|
|
|
|
|
|
|
|
|
|
// Weight it exponentially. Be aware that this could at some point
|
|
|
|
|
// overflow if alpha or the number of blocks used is really large.
|
|
|
|
|
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
|
|
|
|
|
uint((b.chainParams.StakeDiffWindows-windowPeriod)*alpha))
|
|
|
|
|
|
|
|
|
|
// Sum up all the different weights incrementally.
|
|
|
|
|
weights += 1 <<
|
|
|
|
|
uint64((b.chainParams.StakeDiffWindows-windowPeriod)*alpha)
|
|
|
|
|
|
|
|
|
|
// Store it in the slice.
|
|
|
|
|
windowChanges[windowPeriod] = windowAdjusted
|
|
|
|
|
|
|
|
|
|
windowFreshStake = 0
|
|
|
|
|
windowPeriod++
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i + 1) == nodesToTraverse {
|
|
|
|
|
break // Exit for loop when we hit the end.
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-27 02:47:45 +00:00
|
|
|
// Get the previous node while staying at the genesis block as
|
|
|
|
|
// needed.
|
|
|
|
|
if oldNode.parent != nil {
|
|
|
|
|
oldNode = oldNode.parent
|
2016-04-29 02:19:12 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sum up the weighted window periods.
|
|
|
|
|
weightedSum = big.NewInt(0)
|
|
|
|
|
for i := int64(0); i < b.chainParams.StakeDiffWindows; i++ {
|
|
|
|
|
weightedSum.Add(weightedSum, windowChanges[i])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Divide by the sum of all weights.
|
|
|
|
|
weightsBig = big.NewInt(int64(weights))
|
|
|
|
|
weightedSumDiv = weightedSum.Div(weightedSum, weightsBig)
|
|
|
|
|
|
|
|
|
|
// Multiply by the old stake diff.
|
|
|
|
|
oldDiffBig = big.NewInt(oldDiff)
|
|
|
|
|
nextDiffBig = weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
|
|
|
|
|
|
|
|
|
|
// Right shift to restore the original padding (restore non-fixed point).
|
|
|
|
|
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
|
|
|
|
|
nextDiffFreshStake := nextDiffBig.Int64()
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return nextDiffFreshStake, nil
|
|
|
|
|
} else if nextDiffFreshStake == 0 {
|
|
|
|
|
nextDiffFreshStake = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiffFreshStake / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffFreshStake = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiffFreshStake) > (maxRetarget - 1) {
|
|
|
|
|
nextDiffFreshStake = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Average the two differences using scaled multiplication.
|
|
|
|
|
nextDiff := mergeDifficulty(oldDiff, nextDiffTicketPool, nextDiffFreshStake)
|
|
|
|
|
|
|
|
|
|
// Check to see if we're over the limits for the maximum allowable retarget;
|
|
|
|
|
// if we are, return the maximum or minimum except in the case that oldDiff
|
|
|
|
|
// is zero.
|
|
|
|
|
if oldDiff == 0 { // This should never really happen, but in case it does...
|
|
|
|
|
return oldDiff, nil
|
|
|
|
|
} else if nextDiff == 0 {
|
|
|
|
|
nextDiff = oldDiff / maxRetarget
|
|
|
|
|
} else if (nextDiff / oldDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiff = oldDiff * maxRetarget
|
|
|
|
|
} else if (oldDiff / nextDiff) > (maxRetarget - 1) {
|
|
|
|
|
nextDiff = oldDiff / maxRetarget
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the next diff is below the network minimum, set the required stake
|
|
|
|
|
// difficulty to the minimum.
|
|
|
|
|
if nextDiff < b.chainParams.MinimumStakeDiff {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nextDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// estimateNextStakeDifficultyV2 estimates the next stake difficulty using the
|
|
|
|
|
// algorithm defined in DCP0001 by pretending the provided number of tickets
|
|
|
|
|
// will be purchased in the remainder of the interval unless the flag to use max
|
|
|
|
|
// tickets is set in which case it will use the max possible number of tickets
|
|
|
|
|
// that can be purchased in the remainder of the interval.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) estimateNextStakeDifficultyV2(curNode *blockNode, newTickets int64, useMaxTickets bool) (int64, error) {
|
|
|
|
|
// Calculate the next retarget interval height.
|
|
|
|
|
curHeight := int64(0)
|
|
|
|
|
if curNode != nil {
|
|
|
|
|
curHeight = curNode.height
|
|
|
|
|
}
|
2018-03-03 20:54:54 +00:00
|
|
|
ticketMaturity := int64(b.chainParams.TicketMaturity)
|
2017-04-24 08:41:40 +00:00
|
|
|
intervalSize := b.chainParams.StakeDiffWindowSize
|
|
|
|
|
blocksUntilRetarget := intervalSize - curHeight%intervalSize
|
|
|
|
|
nextRetargetHeight := curHeight + blocksUntilRetarget
|
|
|
|
|
|
|
|
|
|
// Calculate the maximum possible number of tickets that could be sold
|
|
|
|
|
// in the remainder of the interval and potentially override the number
|
|
|
|
|
// of new tickets to include in the estimate per the user-specified
|
|
|
|
|
// flag.
|
|
|
|
|
maxTicketsPerBlock := int64(b.chainParams.MaxFreshStakePerBlock)
|
|
|
|
|
maxRemainingTickets := (blocksUntilRetarget - 1) * maxTicketsPerBlock
|
|
|
|
|
if useMaxTickets {
|
|
|
|
|
newTickets = maxRemainingTickets
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Ensure the specified number of tickets is not too high.
|
|
|
|
|
if newTickets > maxRemainingTickets {
|
|
|
|
|
return 0, fmt.Errorf("unable to create an estimated stake "+
|
|
|
|
|
"difficulty with %d tickets since it is more than "+
|
|
|
|
|
"the maximum remaining of %d", newTickets,
|
|
|
|
|
maxRemainingTickets)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stake difficulty before any tickets could possibly be purchased is
|
|
|
|
|
// the minimum value.
|
|
|
|
|
stakeDiffStartHeight := int64(b.chainParams.CoinbaseMaturity) + 1
|
|
|
|
|
if nextRetargetHeight < stakeDiffStartHeight {
|
|
|
|
|
return b.chainParams.MinimumStakeDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get the pool size and number of tickets that were immature at the
|
|
|
|
|
// previous retarget interval
|
|
|
|
|
//
|
|
|
|
|
// NOTE: Since the stake difficulty must be calculated based on existing
|
|
|
|
|
// blocks, it is always calculated for the block after a given block, so
|
|
|
|
|
// the information for the previous retarget interval must be retrieved
|
|
|
|
|
// relative to the block just before it to coincide with how it was
|
|
|
|
|
// originally calculated.
|
|
|
|
|
var prevPoolSize int64
|
|
|
|
|
prevRetargetHeight := nextRetargetHeight - intervalSize - 1
|
2018-05-27 02:47:45 +00:00
|
|
|
prevRetargetNode := curNode.Ancestor(prevRetargetHeight)
|
2017-04-24 08:41:40 +00:00
|
|
|
if prevRetargetNode != nil {
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
prevPoolSize = int64(prevRetargetNode.poolSize)
|
2017-04-24 08:41:40 +00:00
|
|
|
}
|
2018-05-27 02:47:45 +00:00
|
|
|
prevImmatureTickets := b.sumPurchasedTickets(prevRetargetNode,
|
2017-04-24 08:41:40 +00:00
|
|
|
ticketMaturity)
|
|
|
|
|
|
|
|
|
|
// Return the existing ticket price for the first few intervals to avoid
|
|
|
|
|
// division by zero and encourage initial pool population.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
curDiff := curNode.sbits
|
2017-04-24 08:41:40 +00:00
|
|
|
prevPoolSizeAll := prevPoolSize + prevImmatureTickets
|
|
|
|
|
if prevPoolSizeAll == 0 {
|
|
|
|
|
return curDiff, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculate the number of tickets that will still be immature at the
|
2018-03-03 20:54:54 +00:00
|
|
|
// next retarget based on the known (non-estimated) data.
|
|
|
|
|
//
|
|
|
|
|
// Note that when the interval size is larger than the ticket maturity,
|
|
|
|
|
// the current height might be before the maturity floor (the point
|
|
|
|
|
// after which the remaining tickets will remain immature). There are
|
|
|
|
|
// therefore no possible remaining immature tickets from the blocks that
|
|
|
|
|
// are not being estimated in that case.
|
|
|
|
|
var remainingImmatureTickets int64
|
2017-04-24 08:41:40 +00:00
|
|
|
nextMaturityFloor := nextRetargetHeight - ticketMaturity - 1
|
2018-03-03 20:54:54 +00:00
|
|
|
if curHeight > nextMaturityFloor {
|
2018-05-27 02:47:45 +00:00
|
|
|
remainingImmatureTickets = b.sumPurchasedTickets(curNode,
|
2018-03-03 20:54:54 +00:00
|
|
|
curHeight-nextMaturityFloor)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add the number of tickets that will still be immature at the next
|
|
|
|
|
// retarget based on the estimated data.
|
|
|
|
|
maxImmatureTickets := ticketMaturity * maxTicketsPerBlock
|
|
|
|
|
if newTickets > maxImmatureTickets {
|
|
|
|
|
remainingImmatureTickets += maxImmatureTickets
|
|
|
|
|
} else {
|
|
|
|
|
remainingImmatureTickets += newTickets
|
2017-04-24 08:41:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculate the number of tickets that will mature in the remainder of
|
2018-03-03 20:54:54 +00:00
|
|
|
// the interval based on the known (non-estimated) data.
|
2017-04-24 08:41:40 +00:00
|
|
|
//
|
|
|
|
|
// NOTE: The pool size in the block headers does not include the tickets
|
|
|
|
|
// maturing at the height in which they mature since they are not
|
|
|
|
|
// eligible for selection until the next block, so exclude them by
|
|
|
|
|
// starting one block before the next maturity floor.
|
2018-03-03 20:54:54 +00:00
|
|
|
finalMaturingHeight := nextMaturityFloor - 1
|
|
|
|
|
if finalMaturingHeight > curHeight {
|
|
|
|
|
finalMaturingHeight = curHeight
|
|
|
|
|
}
|
2018-05-27 02:47:45 +00:00
|
|
|
finalMaturingNode := curNode.Ancestor(finalMaturingHeight)
|
2018-03-03 20:54:54 +00:00
|
|
|
firstMaturingHeight := curHeight - ticketMaturity
|
2018-05-27 02:47:45 +00:00
|
|
|
maturingTickets := b.sumPurchasedTickets(finalMaturingNode,
|
2018-03-03 20:54:54 +00:00
|
|
|
finalMaturingHeight-firstMaturingHeight+1)
|
2017-04-24 08:41:40 +00:00
|
|
|
|
2018-03-03 20:54:54 +00:00
|
|
|
// Add the number of tickets that will mature based on the estimated data.
|
|
|
|
|
//
|
|
|
|
|
// Note that when the ticket maturity is greater than or equal to the
|
|
|
|
|
// interval size, the current height will always be after the maturity
|
|
|
|
|
// floor. There are therefore no possible maturing estimated tickets
|
|
|
|
|
// in that case.
|
|
|
|
|
if curHeight < nextMaturityFloor {
|
|
|
|
|
maturingEstimateNodes := nextMaturityFloor - curHeight - 1
|
|
|
|
|
maturingEstimatedTickets := maxTicketsPerBlock * maturingEstimateNodes
|
|
|
|
|
if maturingEstimatedTickets > newTickets {
|
|
|
|
|
maturingEstimatedTickets = newTickets
|
|
|
|
|
}
|
|
|
|
|
maturingTickets += maturingEstimatedTickets
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// Calculate the number of votes that will occur during the remainder of
|
|
|
|
|
// the interval.
|
2017-10-10 21:20:40 +00:00
|
|
|
stakeValidationHeight := b.chainParams.StakeValidationHeight
|
2017-04-24 08:41:40 +00:00
|
|
|
var pendingVotes int64
|
|
|
|
|
if nextRetargetHeight > stakeValidationHeight {
|
|
|
|
|
votingBlocks := blocksUntilRetarget - 1
|
|
|
|
|
if curHeight < stakeValidationHeight {
|
|
|
|
|
votingBlocks = nextRetargetHeight - stakeValidationHeight
|
|
|
|
|
}
|
|
|
|
|
votesPerBlock := int64(b.chainParams.TicketsPerBlock)
|
|
|
|
|
pendingVotes = votingBlocks * votesPerBlock
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculate what the pool size would be as of the next interval.
|
blockchain: Don't store full header in block node.
This modifies the block node structure to include only the specifically
used fields, some of which in a more compact format, as opposed to
copying the entire header and updates all code and tests accordingly.
Not only is this a more efficient approach that helps pave the way for
future optimizations, it is also consistent with the upstream code which
helps minimize the differences to facilitate easier syncs due to less
merge conflicts.
In particular, since the merkle and stake roots, number of revocations,
size, nonce, and extradata fields aren't used currently, they are no
longer copied into the block node. Also, the block node already had a
height field, which is also in the header, so this change also removes
that duplication.
Another change is that the block node now stores the timestamp as an
int64 unix-style timestamp which is only 8 bytes versus the old
timestamp that was in the header which is a time.Time and thus 24 bytes.
It should be noted that future optimizations will very likely end up
adding most of the omitted header fields back to the block node as
individual fields so the headers can be efficiently reconstructed from
memory, however, these changes are still beneficial due to the ability
to decouple the block node storage format from the header struct which
allows more compact representations and reording of the fields for
optimal struct packing.
Ultimately, the need for the parent hash can also be removed, which will
save an additional 32 bytes which would not be possible without this
decoupling.
2018-01-28 07:56:36 +00:00
|
|
|
curPoolSize := int64(curNode.poolSize)
|
2017-04-24 08:41:40 +00:00
|
|
|
estimatedPoolSize := curPoolSize + maturingTickets - pendingVotes
|
2018-03-03 20:54:54 +00:00
|
|
|
estimatedPoolSizeAll := estimatedPoolSize + remainingImmatureTickets
|
2017-04-24 08:41:40 +00:00
|
|
|
|
|
|
|
|
// Calculate and return the final estimated difficulty.
|
|
|
|
|
return calcNextStakeDiffV2(b.chainParams, nextRetargetHeight, curDiff,
|
|
|
|
|
prevPoolSizeAll, estimatedPoolSizeAll), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// estimateNextStakeDifficulty estimates the next stake difficulty by pretending
|
|
|
|
|
// the provided number of tickets will be purchased in the remainder of the
|
|
|
|
|
// interval unless the flag to use max tickets is set in which case it will use
|
|
|
|
|
// the max possible number of tickets that can be purchased in the remainder of
|
|
|
|
|
// the interval.
|
|
|
|
|
//
|
|
|
|
|
// The stake difficulty algorithm is selected based on the active rules.
|
|
|
|
|
//
|
|
|
|
|
// This function differs from the exported EstimateNextStakeDifficulty in that
|
|
|
|
|
// the exported version uses the current best chain as the block node while this
|
|
|
|
|
// function accepts any block node.
|
|
|
|
|
//
|
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
|
|
|
|
func (b *BlockChain) estimateNextStakeDifficulty(curNode *blockNode, newTickets int64, useMaxTickets bool) (int64, error) {
|
2019-01-28 07:54:35 +00:00
|
|
|
// Determine the correct deployment version for the new stake difficulty
|
|
|
|
|
// algorithm consensus vote or treat it as active when voting is not enabled
|
|
|
|
|
// for the current network.
|
|
|
|
|
const deploymentID = chaincfg.VoteIDSDiffAlgorithm
|
|
|
|
|
deploymentVer, ok := b.deploymentVers[deploymentID]
|
|
|
|
|
if !ok {
|
2018-08-08 11:23:15 +00:00
|
|
|
return b.calcNextRequiredStakeDifficultyV2(curNode)
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:41:40 +00:00
|
|
|
// Use the new stake difficulty algorithm if the stake vote for the new
|
|
|
|
|
// algorithm agenda is active.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: The choice field of the return threshold state is not examined
|
|
|
|
|
// here because there is only one possible choice that can be active
|
|
|
|
|
// for the agenda, which is yes, so there is no need to check it.
|
2019-01-28 07:54:35 +00:00
|
|
|
state, err := b.deploymentState(curNode, deploymentVer, deploymentID)
|
2017-04-24 08:41:40 +00:00
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
if state.State == ThresholdActive {
|
|
|
|
|
return b.estimateNextStakeDifficultyV2(curNode, newTickets,
|
|
|
|
|
useMaxTickets)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use the old stake difficulty algorithm in any other case.
|
|
|
|
|
return b.estimateNextStakeDifficultyV1(curNode, newTickets,
|
2016-04-29 02:19:12 +00:00
|
|
|
useMaxTickets)
|
|
|
|
|
}
|
2017-04-24 08:41:40 +00:00
|
|
|
|
|
|
|
|
// EstimateNextStakeDifficulty estimates the next stake difficulty by pretending
|
|
|
|
|
// the provided number of tickets will be purchased in the remainder of the
|
|
|
|
|
// interval unless the flag to use max tickets is set in which case it will use
|
|
|
|
|
// the max possible number of tickets that can be purchased in the remainder of
|
|
|
|
|
// the interval.
|
|
|
|
|
//
|
|
|
|
|
// This function is safe for concurrent access.
|
|
|
|
|
func (b *BlockChain) EstimateNextStakeDifficulty(newTickets int64, useMaxTickets bool) (int64, error) {
|
|
|
|
|
b.chainLock.Lock()
|
2018-07-09 20:00:15 +00:00
|
|
|
estimate, err := b.estimateNextStakeDifficulty(b.bestChain.Tip(),
|
|
|
|
|
newTickets, useMaxTickets)
|
2017-04-24 08:41:40 +00:00
|
|
|
b.chainLock.Unlock()
|
|
|
|
|
return estimate, err
|
|
|
|
|
}
|