Hush Full Node software. We were censored from Github, this is where all development happens now. https://hush.is
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

916 lines
33 KiB

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin Core developers
4 years ago
// Copyright (c) 2019-2020 The Hush developers
// Distributed under the GPLv3 software license, see the accompanying
// file COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html
/******************************************************************************
* Copyright © 2014-2019 The SuperNET Developers. *
* *
* See the AUTHORS, DEVELOPER-AGREEMENT and LICENSE files at *
* the top-level directory of this distribution for the individual copyright *
* holder information and the developer policies on copyright and licensing. *
* *
* Unless otherwise agreed in a custom licensing agreement, no part of the *
* SuperNET software, including this file may be copied, modified, propagated *
* or distributed except according to the terms contained in the LICENSE file *
* *
* Removal or modification of this copyright notice is prohibited. *
* *
******************************************************************************/
#include "txmempool.h"
#include "clientversion.h"
#include "consensus/consensus.h"
#include "consensus/validation.h"
#include "main.h"
#include "policy/fees.h"
#include "streams.h"
#include "timedata.h"
#include "util.h"
#include "utilmoneystr.h"
#include "validationinterface.h"
#include "version.h"
7 years ago
#define _COINBASE_MATURITY 100
using namespace std;
CTxMemPoolEntry::CTxMemPoolEntry():
nFee(0), nTxSize(0), nModSize(0), nUsageSize(0), nTime(0), dPriority(0.0),
hadNoDependencies(false), spendsCoinbase(false)
{
nHeight = MEMPOOL_HEIGHT;
}
CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee,
int64_t _nTime, double _dPriority,
unsigned int _nHeight, bool poolHasNoInputsOf,
bool _spendsCoinbase, uint32_t _nBranchId):
tx(_tx), nFee(_nFee), nTime(_nTime), dPriority(_dPriority), nHeight(_nHeight),
hadNoDependencies(poolHasNoInputsOf),
spendsCoinbase(_spendsCoinbase), nBranchId(_nBranchId)
{
nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
nModSize = tx.CalculateModifiedSize(nTxSize);
nUsageSize = RecursiveDynamicUsage(tx);
feeRate = CFeeRate(nFee, nTxSize);
}
CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry& other)
{
*this = other;
}
double
CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const
{
CAmount nValueIn = tx.GetValueOut()+nFee;
double deltaPriority = ((double)(currentHeight-nHeight)*nValueIn)/nModSize;
double dResult = dPriority + deltaPriority;
return dResult;
}
CTxMemPool::CTxMemPool(const CFeeRate& _minRelayFee) :
nTransactionsUpdated(0)
{
// Sanity checks off by default for performance, because otherwise
// accepting transactions becomes O(N^2) where N is the number
// of transactions in the pool
nCheckFrequency = 0;
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
minerPolicyEstimator = new CBlockPolicyEstimator(_minRelayFee);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
}
CTxMemPool::~CTxMemPool()
{
delete minerPolicyEstimator;
}
void CTxMemPool::pruneSpent(const uint256 &hashTx, CCoins &coins)
{
LOCK(cs);
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.lower_bound(COutPoint(hashTx, 0));
// iterate over all COutPoints in mapNextTx whose hash equals the provided hashTx
while (it != mapNextTx.end() && it->first.hash == hashTx) {
coins.Spend(it->first.n); // and remove those outputs from coins
it++;
}
}
unsigned int CTxMemPool::GetTransactionsUpdated() const
{
LOCK(cs);
return nTransactionsUpdated;
}
void CTxMemPool::AddTransactionsUpdated(unsigned int n)
{
LOCK(cs);
nTransactionsUpdated += n;
}
bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate)
{
// Add to memory pool without checking anything.
// Used by main.cpp AcceptToMemoryPool(), which DOES do
// all the appropriate checks.
LOCK(cs);
mapTx.insert(entry);
const CTransaction& tx = mapTx.find(hash)->GetTx();
mapRecentlyAddedTx[tx.GetHash()] = &tx;
nRecentlyAddedSequence += 1;
6 years ago
if (!tx.IsCoinImport()) {
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
if (tx.IsPegsImport() && i==0) continue;
6 years ago
mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i);
}
6 years ago
}
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
mapSproutNullifiers[nf] = &tx;
}
}
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
mapSaplingNullifiers[spendDescription.nullifier] = &tx;
}
nTransactionsUpdated++;
totalTxSize += entry.GetTxSize();
cachedInnerUsage += entry.DynamicMemoryUsage();
minerPolicyEstimator->processTransaction(entry, fCurrentEstimate);
return true;
}
6 years ago
void CTxMemPool::addAddressIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view)
{
LOCK(cs);
const CTransaction& tx = entry.GetTx();
std::vector<CMempoolAddressDeltaKey> inserted;
uint256 txhash = tx.GetHash();
for (unsigned int j = 0; j < tx.vin.size(); j++) {
if (tx.IsPegsImport() && j==0) continue;
6 years ago
const CTxIn input = tx.vin[j];
const CTxOut &prevout = view.GetOutputFor(input);
vector<vector<unsigned char>> vSols;
txnouttype txType = TX_PUBKEYHASH;
int keyType = 1;
CTxDestination vDest;
if (Solver(prevout.scriptPubKey, txType, vSols) || ExtractDestination(prevout.scriptPubKey, vDest))
{
if (vDest.which())
{
uint160 hashBytes;
5 years ago
if (CBitcoinAddress(vDest).GetIndexKey(hashBytes, keyType, prevout.scriptPubKey.IsPayToCryptoCondition()))
{
vSols.push_back(vector<unsigned char>(hashBytes.begin(), hashBytes.end()));
}
}
if (txType == TX_SCRIPTHASH)
{
keyType = 2;
}
for (auto addr : vSols)
{
CMempoolAddressDeltaKey key(keyType, addr.size() == 20 ? uint160(addr) : Hash160(addr), txhash, j, true);
CMempoolAddressDelta delta(entry.GetTime(), prevout.nValue * -1, input.prevout.hash, input.prevout.n);
mapAddress.insert(make_pair(key, delta));
inserted.push_back(key);
}
}
}
6 years ago
for (unsigned int k = 0; k < tx.vout.size(); k++) {
const CTxOut &out = tx.vout[k];
vector<vector<unsigned char>> vSols;
CTxDestination vDest;
txnouttype txType = TX_PUBKEYHASH;
int keyType = 1;
if ((Solver(out.scriptPubKey, txType, vSols) || ExtractDestination(out.scriptPubKey, vDest)) && txType != TX_MULTISIG)
{
// if we failed to solve, and got a vDest, assume P2PKH or P2PK address returned
if (vDest.which())
{
uint160 hashBytes;
5 years ago
if (CBitcoinAddress(vDest).GetIndexKey(hashBytes, keyType, out.scriptPubKey.IsPayToCryptoCondition()))
{
vSols.push_back(vector<unsigned char>(hashBytes.begin(), hashBytes.end()));
}
}
else if (txType == TX_SCRIPTHASH)
{
keyType = 2;
}
for (auto addr : vSols)
{
CMempoolAddressDeltaKey key(keyType, addr.size() == 20 ? uint160(addr) : Hash160(addr), txhash, k, 0);
mapAddress.insert(make_pair(key, CMempoolAddressDelta(entry.GetTime(), out.nValue)));
inserted.push_back(key);
}
}
6 years ago
}
mapAddressInserted.insert(make_pair(txhash, inserted));
}
bool CTxMemPool::getAddressIndex(std::vector<std::pair<uint160, int> > &addresses,
std::vector<std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> > &results)
{
LOCK(cs);
for (std::vector<std::pair<uint160, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) {
addressDeltaMap::iterator ait = mapAddress.lower_bound(CMempoolAddressDeltaKey((*it).second, (*it).first));
while (ait != mapAddress.end() && (*ait).first.addressBytes == (*it).first && (*ait).first.type == (*it).second) {
results.push_back(*ait);
ait++;
}
}
return true;
}
bool CTxMemPool::removeAddressIndex(const uint256 txhash)
{
LOCK(cs);
addressDeltaMapInserted::iterator it = mapAddressInserted.find(txhash);
if (it != mapAddressInserted.end()) {
std::vector<CMempoolAddressDeltaKey> keys = (*it).second;
for (std::vector<CMempoolAddressDeltaKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) {
mapAddress.erase(*mit);
}
mapAddressInserted.erase(it);
}
return true;
}
void CTxMemPool::addSpentIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view)
{
LOCK(cs);
const CTransaction& tx = entry.GetTx();
std::vector<CSpentIndexKey> inserted;
uint256 txhash = tx.GetHash();
for (unsigned int j = 0; j < tx.vin.size(); j++) {
if (tx.IsPegsImport() && j==0) continue;
6 years ago
const CTxIn input = tx.vin[j];
const CTxOut &prevout = view.GetOutputFor(input);
vector<vector<unsigned char>> vSols;
CTxDestination vDest;
txnouttype txType = TX_PUBKEYHASH;
int keyType = 1;
// some non-standard types, like time lock coinbases, don't solve, but do extract
if ((Solver(prevout.scriptPubKey, txType, vSols) || ExtractDestination(prevout.scriptPubKey, vDest)) && txType != TX_MULTISIG)
{
// if we failed to solve, and got a vDest, assume P2PKH or P2PK address returned
if (vDest.which())
{
CKeyID kid;
if (CBitcoinAddress(vDest).GetKeyID(kid))
{
vSols.push_back(vector<unsigned char>(kid.begin(), kid.end()));
}
}
else if (txType == TX_SCRIPTHASH)
{
keyType = 2;
}
for (auto addr : vSols)
{
CSpentIndexKey key = CSpentIndexKey(input.prevout.hash, input.prevout.n);
CSpentIndexValue value = CSpentIndexValue(txhash, j, -1, prevout.nValue, keyType, addr.size() == 20 ? uint160(addr) : Hash160(addr));
mapSpent.insert(make_pair(key, value));
inserted.push_back(key);
}
}
else
{
// don't know exactly how, but it was spent
CSpentIndexKey key = CSpentIndexKey(input.prevout.hash, input.prevout.n);
CSpentIndexValue value = CSpentIndexValue(txhash, j, -1, prevout.nValue, 0, uint160());
6 years ago
mapSpent.insert(make_pair(key, value));
inserted.push_back(key);
}
6 years ago
}
mapSpentInserted.insert(make_pair(txhash, inserted));
}
bool CTxMemPool::getSpentIndex(CSpentIndexKey &key, CSpentIndexValue &value)
{
LOCK(cs);
mapSpentIndex::iterator it;
it = mapSpent.find(key);
if (it != mapSpent.end()) {
value = it->second;
return true;
}
return false;
}
bool CTxMemPool::removeSpentIndex(const uint256 txhash)
{
LOCK(cs);
mapSpentIndexInserted::iterator it = mapSpentInserted.find(txhash);
if (it != mapSpentInserted.end()) {
std::vector<CSpentIndexKey> keys = (*it).second;
for (std::vector<CSpentIndexKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) {
mapSpent.erase(*mit);
}
mapSpentInserted.erase(it);
}
return true;
}
void CTxMemPool::remove(const CTransaction &origTx, std::list<CTransaction>& removed, bool fRecursive)
{
// Remove transaction from memory pool
{
LOCK(cs);
std::deque<uint256> txToRemove;
txToRemove.push_back(origTx.GetHash());
if (fRecursive && !mapTx.count(origTx.GetHash())) {
// If recursively removing but origTx isn't in the mempool
// be sure to remove any children that are in the pool. This can
// happen during chain re-orgs if origTx isn't re-accepted into
// the mempool for any reason.
for (unsigned int i = 0; i < origTx.vout.size(); i++) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(origTx.GetHash(), i));
if (it == mapNextTx.end())
continue;
txToRemove.push_back(it->second.ptx->GetHash());
}
}
while (!txToRemove.empty())
{
uint256 hash = txToRemove.front();
txToRemove.pop_front();
if (!mapTx.count(hash))
continue;
const CTransaction& tx = mapTx.find(hash)->GetTx();
if (fRecursive) {
for (unsigned int i = 0; i < tx.vout.size(); i++) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(hash, i));
if (it == mapNextTx.end())
continue;
txToRemove.push_back(it->second.ptx->GetHash());
}
}
mapRecentlyAddedTx.erase(hash);
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapNextTx.erase(txin.prevout);
BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers) {
mapSproutNullifiers.erase(nf);
}
}
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
mapSaplingNullifiers.erase(spendDescription.nullifier);
}
removed.push_back(tx);
totalTxSize -= mapTx.find(hash)->GetTxSize();
cachedInnerUsage -= mapTx.find(hash)->DynamicMemoryUsage();
mapTx.erase(hash);
nTransactionsUpdated++;
minerPolicyEstimator->removeTx(hash);
6 years ago
removeAddressIndex(hash);
removeSpentIndex(hash);
}
}
}
extern uint64_t ASSETCHAINS_TIMELOCKGTE;
int64_t komodo_block_unlocktime(uint32_t nHeight);
void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags)
{
// Remove transactions spending a coinbase which are now immature
6 years ago
extern char ASSETCHAINS_SYMBOL[KOMODO_ASSETCHAIN_MAXLEN];
7 years ago
if ( ASSETCHAINS_SYMBOL[0] == 0 )
COINBASE_MATURITY = _COINBASE_MATURITY;
// Remove transactions spending a coinbase which are now immature and no-longer-final transactions
LOCK(cs);
list<CTransaction> transactionsToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
const CTransaction& tx = it->GetTx();
if (!CheckFinalTx(tx, flags)) {
transactionsToRemove.push_back(tx);
} else if (it->GetSpendsCoinbase()) {
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash);
if (it2 != mapTx.end())
continue;
const CCoins *coins = pcoins->AccessCoins(txin.prevout.hash);
if (nCheckFrequency != 0) assert(coins);
if (!coins || (coins->IsCoinBase() && (((signed long)nMemPoolHeight) - coins->nHeight < COINBASE_MATURITY) &&
((signed long)nMemPoolHeight < komodo_block_unlocktime(coins->nHeight) &&
coins->IsAvailable(0) && coins->vout[0].nValue >= ASSETCHAINS_TIMELOCKGTE))) {
transactionsToRemove.push_back(tx);
break;
}
}
}
}
BOOST_FOREACH(const CTransaction& tx, transactionsToRemove) {
list<CTransaction> removed;
remove(tx, removed, true);
}
}
void CTxMemPool::removeWithAnchor(const uint256 &invalidRoot, ShieldedType type)
{
// If a block is disconnected from the tip, and the root changed,
// we must invalidate transactions from the mempool which spend
// from that root -- almost as though they were spending coinbases
// which are no longer valid to spend due to coinbase maturity.
LOCK(cs);
list<CTransaction> transactionsToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
const CTransaction& tx = it->GetTx();
switch (type) {
case SPROUT:
BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit) {
if (joinsplit.anchor == invalidRoot) {
transactionsToRemove.push_back(tx);
break;
}
}
break;
case SAPLING:
BOOST_FOREACH(const SpendDescription& spendDescription, tx.vShieldedSpend) {
if (spendDescription.anchor == invalidRoot) {
transactionsToRemove.push_back(tx);
break;
}
}
break;
default:
throw runtime_error("Unknown shielded type");
break;
}
}
BOOST_FOREACH(const CTransaction& tx, transactionsToRemove) {
list<CTransaction> removed;
remove(tx, removed, true);
}
}
void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed)
{
// Remove transactions which depend on inputs of tx, recursively
list<CTransaction> result;
LOCK(cs);
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second.ptx;
if (txConflict != tx)
{
remove(txConflict, removed, true);
}
}
}
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
std::map<uint256, const CTransaction*>::iterator it = mapSproutNullifiers.find(nf);
if (it != mapSproutNullifiers.end()) {
const CTransaction &txConflict = *it->second;
if (txConflict != tx) {
remove(txConflict, removed, true);
}
}
}
}
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
std::map<uint256, const CTransaction*>::iterator it = mapSaplingNullifiers.find(spendDescription.nullifier);
if (it != mapSaplingNullifiers.end()) {
const CTransaction &txConflict = *it->second;
if (txConflict != tx) {
remove(txConflict, removed, true);
}
}
}
}
6 years ago
int32_t komodo_validate_interest(const CTransaction &tx,int32_t txheight,uint32_t nTime,int32_t dispflag);
6 years ago
extern char ASSETCHAINS_SYMBOL[];
6 years ago
std::vector<uint256> CTxMemPool::removeExpired(unsigned int nBlockHeight)
{
6 years ago
CBlockIndex *tipindex;
// Remove expired txs from the mempool
LOCK(cs);
list<CTransaction> transactionsToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++)
{
const CTransaction& tx = it->GetTx();
tipindex = chainActive.LastTip();
bool fInterestNotValidated = ASSETCHAINS_SYMBOL[0] == 0 && tipindex != 0 && komodo_validate_interest(tx,tipindex->GetHeight()+1,tipindex->GetMedianTimePast() + 777,0) < 0;
if (IsExpiredTx(tx, nBlockHeight) || fInterestNotValidated)
{
if (fInterestNotValidated && tipindex != 0)
LogPrintf("Removing interest violate txid.%s nHeight.%d nTime.%u vs locktime.%u\n",tx.GetHash().ToString(),tipindex->GetHeight()+1,tipindex->GetMedianTimePast() + 777,tx.nLockTime);
transactionsToRemove.push_back(tx);
}
}
std::vector<uint256> ids;
for (const CTransaction& tx : transactionsToRemove) {
list<CTransaction> removed;
remove(tx, removed, true);
ids.push_back(tx.GetHash());
LogPrint("mempool", "Removing expired txid: %s\n", tx.GetHash().ToString());
}
return ids;
}
/**
* Called when a block is connected. Removes from mempool and updates the miner fee estimator.
*/
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight,
std::list<CTransaction>& conflicts, bool fCurrentEstimate)
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
{
LOCK(cs);
std::vector<CTxMemPoolEntry> entries;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
uint256 hash = tx.GetHash();
indexed_transaction_set::iterator i = mapTx.find(hash);
if (i != mapTx.end())
entries.push_back(*i);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
}
BOOST_FOREACH(const CTransaction& tx, vtx)
{
std::list<CTransaction> dummy;
remove(tx, dummy, false);
removeConflicts(tx, conflicts);
ClearPrioritisation(tx.GetHash());
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
}
// After the txs in the new block have been removed from the mempool, update policy estimates
minerPolicyEstimator->processBlock(nBlockHeight, entries, fCurrentEstimate);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
}
/**
* Called whenever the tip changes. Removes transactions which don't commit to
* the given branch ID from the mempool.
*/
void CTxMemPool::removeWithoutBranchId(uint32_t nMemPoolBranchId)
{
LOCK(cs);
std::list<CTransaction> transactionsToRemove;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
const CTransaction& tx = it->GetTx();
if (it->GetValidatedBranchId() != nMemPoolBranchId) {
transactionsToRemove.push_back(tx);
}
}
for (const CTransaction& tx : transactionsToRemove) {
std::list<CTransaction> removed;
remove(tx, removed, true);
}
}
void CTxMemPool::clear()
{
LOCK(cs);
mapTx.clear();
mapNextTx.clear();
totalTxSize = 0;
cachedInnerUsage = 0;
++nTransactionsUpdated;
}
void CTxMemPool::check(const CCoinsViewCache *pcoins) const
{
if (nCheckFrequency == 0)
return;
if (insecure_rand() >= nCheckFrequency)
return;
LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
uint64_t checkTotal = 0;
uint64_t innerUsage = 0;
CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins));
const int64_t nSpendHeight = GetSpendHeight(mempoolDuplicate);
LOCK(cs);
list<const CTxMemPoolEntry*> waitingOnDependants;
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
unsigned int i = 0;
checkTotal += it->GetTxSize();
innerUsage += it->DynamicMemoryUsage();
const CTransaction& tx = it->GetTx();
bool fDependsWait = false;
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
// Check that every mempool transaction's inputs refer to available coins, or other mempool tx's.
indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash);
if (it2 != mapTx.end()) {
const CTransaction& tx2 = it2->GetTx();
assert(tx2.vout.size() > txin.prevout.n && !tx2.vout[txin.prevout.n].IsNull());
fDependsWait = true;
} else {
const CCoins* coins = pcoins->AccessCoins(txin.prevout.hash);
assert(coins && coins->IsAvailable(txin.prevout.n));
}
// Check whether its inputs are marked in mapNextTx.
std::map<COutPoint, CInPoint>::const_iterator it3 = mapNextTx.find(txin.prevout);
assert(it3 != mapNextTx.end());
assert(it3->second.ptx == &tx);
assert(it3->second.n == i);
i++;
}
4 years ago
/*
boost::unordered_map<uint256, SproutMerkleTree, CCoinsKeyHasher> intermediates;
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
assert(!pcoins->GetNullifier(nf, SPROUT));
}
SproutMerkleTree tree;
auto it = intermediates.find(joinsplit.anchor);
if (it != intermediates.end()) {
tree = it->second;
} else {
assert(pcoins->GetSproutAnchorAt(joinsplit.anchor, tree));
}
BOOST_FOREACH(const uint256& commitment, joinsplit.commitments)
{
tree.append(commitment);
}
intermediates.insert(std::make_pair(tree.root(), tree));
}
4 years ago
*/
for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
SaplingMerkleTree tree;
assert(pcoins->GetSaplingAnchorAt(spendDescription.anchor, tree));
assert(!pcoins->GetNullifier(spendDescription.nullifier, SAPLING));
}
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
else {
CValidationState state;
bool fCheckResult = tx.IsCoinBase() ||
Consensus::CheckTxInputs(tx, state, mempoolDuplicate, nSpendHeight, Params().GetConsensus());
assert(fCheckResult);
UpdateCoins(tx, mempoolDuplicate, 1000000);
}
}
unsigned int stepsSinceLastRemove = 0;
while (!waitingOnDependants.empty()) {
const CTxMemPoolEntry* entry = waitingOnDependants.front();
waitingOnDependants.pop_front();
CValidationState state;
if (!mempoolDuplicate.HaveInputs(entry->GetTx())) {
waitingOnDependants.push_back(entry);
stepsSinceLastRemove++;
assert(stepsSinceLastRemove < waitingOnDependants.size());
} else {
bool fCheckResult = entry->GetTx().IsCoinBase() ||
Consensus::CheckTxInputs(entry->GetTx(), state, mempoolDuplicate, nSpendHeight, Params().GetConsensus());
assert(fCheckResult);
UpdateCoins(entry->GetTx(), mempoolDuplicate, 1000000);
stepsSinceLastRemove = 0;
}
}
for (std::map<COutPoint, CInPoint>::const_iterator it = mapNextTx.begin(); it != mapNextTx.end(); it++) {
uint256 hash = it->second.ptx->GetHash();
indexed_transaction_set::const_iterator it2 = mapTx.find(hash);
const CTransaction& tx = it2->GetTx();
assert(it2 != mapTx.end());
assert(&tx == it->second.ptx);
assert(tx.vin.size() > it->second.n);
assert(it->first == it->second.ptx->vin[it->second.n].prevout);
}
checkNullifiers(SPROUT);
checkNullifiers(SAPLING);
assert(totalTxSize == checkTotal);
assert(innerUsage == cachedInnerUsage);
}
void CTxMemPool::checkNullifiers(ShieldedType type) const
{
const std::map<uint256, const CTransaction*>* mapToUse;
switch (type) {
case SPROUT:
mapToUse = &mapSproutNullifiers;
break;
case SAPLING:
mapToUse = &mapSaplingNullifiers;
break;
default:
throw runtime_error("Unknown nullifier type");
}
for (const auto& entry : *mapToUse) {
uint256 hash = entry.second->GetHash();
CTxMemPool::indexed_transaction_set::const_iterator findTx = mapTx.find(hash);
const CTransaction& tx = findTx->GetTx();
assert(findTx != mapTx.end());
assert(&tx == entry.second);
}
}
void CTxMemPool::queryHashes(vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back(mi->GetTx().GetHash());
}
bool CTxMemPool::lookup(uint256 hash, CTransaction& result) const
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hash);
if (i == mapTx.end()) return false;
result = i->GetTx();
return true;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
CFeeRate CTxMemPool::estimateFee(int nBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimateFee(nBlocks);
}
double CTxMemPool::estimatePriority(int nBlocks) const
{
LOCK(cs);
return minerPolicyEstimator->estimatePriority(nBlocks);
}
bool
CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const
{
try {
LOCK(cs);
fileout << 109900; // version required to read: 0.10.99 or later
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
fileout << CLIENT_VERSION; // version that wrote the file
minerPolicyEstimator->Write(fileout);
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::WriteFeeEstimates(): unable to write policy estimator data (non-fatal)\n");
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
return false;
}
return true;
}
bool
CTxMemPool::ReadFeeEstimates(CAutoFile& filein)
{
try {
int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION)
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
LOCK(cs);
minerPolicyEstimator->Read(filein);
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
}
catch (const std::exception&) {
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)\n");
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
return false;
}
return true;
}
void CTxMemPool::PrioritiseTransaction(const uint256 hash, const string strHash, double dPriorityDelta, const CAmount& nFeeDelta)
{
{
LOCK(cs);
std::pair<double, CAmount> &deltas = mapDeltas[hash];
deltas.first += dPriorityDelta;
deltas.second += nFeeDelta;
}
LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash, dPriorityDelta, FormatMoney(nFeeDelta));
}
void CTxMemPool::ApplyDeltas(const uint256 hash, double &dPriorityDelta, CAmount &nFeeDelta)
{
LOCK(cs);
std::map<uint256, std::pair<double, CAmount> >::iterator pos = mapDeltas.find(hash);
if (pos == mapDeltas.end())
return;
const std::pair<double, CAmount> &deltas = pos->second;
dPriorityDelta += deltas.first;
nFeeDelta += deltas.second;
}
void CTxMemPool::ClearPrioritisation(const uint256 hash)
{
LOCK(cs);
mapDeltas.erase(hash);
}
bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const
{
for (unsigned int i = 0; i < tx.vin.size(); i++)
if (exists(tx.vin[i].prevout.hash))
return false;
return true;
}
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
10 years ago
bool CTxMemPool::nullifierExists(const uint256& nullifier, ShieldedType type) const
{
switch (type) {
case SPROUT:
return mapSproutNullifiers.count(nullifier);
case SAPLING:
return mapSaplingNullifiers.count(nullifier);
default:
throw runtime_error("Unknown nullifier type");
}
}
std::pair<std::vector<CTransaction>, uint64_t> CTxMemPool::DrainRecentlyAdded()
{
uint64_t recentlyAddedSequence;
std::vector<CTransaction> txs;
{
LOCK(cs);
recentlyAddedSequence = nRecentlyAddedSequence;
for (const auto& kv : mapRecentlyAddedTx) {
txs.push_back(*(kv.second));
}
mapRecentlyAddedTx.clear();
}
return std::make_pair(txs, recentlyAddedSequence);
}
void CTxMemPool::SetNotifiedSequence(uint64_t recentlyAddedSequence) {
assert(Params().NetworkIDString() == "regtest");
LOCK(cs);
nNotifiedSequence = recentlyAddedSequence;
}
bool CTxMemPool::IsFullyNotified() {
assert(Params().NetworkIDString() == "regtest");
LOCK(cs);
return nRecentlyAddedSequence == nNotifiedSequence;
}
std::map<uint256, const CTransaction*> CTxMemPool::getNullifiers() {
return mapSaplingNullifiers;
}
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { }
bool CCoinsViewMemPool::GetNullifier(const uint256 &nf, ShieldedType type) const
{
return mempool.nullifierExists(nf, type) || base->GetNullifier(nf, type);
}
bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) const {
// If an entry in the mempool exists, always return that one, as it's guaranteed to never
// conflict with the underlying cache, and it cannot have pruned entries (as it contains full)
// transactions. First checking the underlying cache risks returning a pruned entry instead.
CTransaction tx;
if (mempool.lookup(txid, tx)) {
coins = CCoins(tx, MEMPOOL_HEIGHT);
return true;
}
return (base->GetCoins(txid, coins) && !coins.IsPruned());
}
bool CCoinsViewMemPool::HaveCoins(const uint256 &txid) const {
return mempool.exists(txid) || base->HaveCoins(txid);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
LOCK(cs);
// Estimate the overhead of mapTx to be 6 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented.
return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 6 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + cachedInnerUsage;
}