Bitcoin Core  27.1.0
P2P Digital Currency
blockstorage.cpp
Go to the documentation of this file.
1 // Copyright (c) 2011-2022 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #include <node/blockstorage.h>
6 
7 #include <arith_uint256.h>
8 #include <chain.h>
9 #include <consensus/params.h>
10 #include <consensus/validation.h>
11 #include <dbwrapper.h>
12 #include <flatfile.h>
13 #include <hash.h>
15 #include <kernel/chainparams.h>
18 #include <logging.h>
19 #include <pow.h>
20 #include <primitives/block.h>
21 #include <primitives/transaction.h>
22 #include <reverse_iterator.h>
23 #include <serialize.h>
24 #include <signet.h>
25 #include <span.h>
26 #include <streams.h>
27 #include <sync.h>
28 #include <tinyformat.h>
29 #include <uint256.h>
30 #include <undo.h>
31 #include <util/batchpriority.h>
32 #include <util/check.h>
33 #include <util/fs.h>
34 #include <util/signalinterrupt.h>
35 #include <util/strencodings.h>
36 #include <util/translation.h>
37 #include <validation.h>
38 
39 #include <map>
40 #include <unordered_map>
41 
42 namespace kernel {
43 static constexpr uint8_t DB_BLOCK_FILES{'f'};
44 static constexpr uint8_t DB_BLOCK_INDEX{'b'};
45 static constexpr uint8_t DB_FLAG{'F'};
46 static constexpr uint8_t DB_REINDEX_FLAG{'R'};
47 static constexpr uint8_t DB_LAST_BLOCK{'l'};
48 // Keys used in previous version that might still be found in the DB:
49 // BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
50 // BlockTreeDB::DB_TXINDEX{'t'}
51 // BlockTreeDB::ReadFlag("txindex")
52 
54 {
55  return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
56 }
57 
58 bool BlockTreeDB::WriteReindexing(bool fReindexing)
59 {
60  if (fReindexing) {
61  return Write(DB_REINDEX_FLAG, uint8_t{'1'});
62  } else {
63  return Erase(DB_REINDEX_FLAG);
64  }
65 }
66 
67 void BlockTreeDB::ReadReindexing(bool& fReindexing)
68 {
69  fReindexing = Exists(DB_REINDEX_FLAG);
70 }
71 
73 {
74  return Read(DB_LAST_BLOCK, nFile);
75 }
76 
77 bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
78 {
79  CDBBatch batch(*this);
80  for (const auto& [file, info] : fileInfo) {
81  batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
82  }
83  batch.Write(DB_LAST_BLOCK, nLastFile);
84  for (const CBlockIndex* bi : blockinfo) {
85  batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
86  }
87  return WriteBatch(batch, true);
88 }
89 
90 bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
91 {
92  return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
93 }
94 
95 bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
96 {
97  uint8_t ch;
98  if (!Read(std::make_pair(DB_FLAG, name), ch)) {
99  return false;
100  }
101  fValue = ch == uint8_t{'1'};
102  return true;
103 }
104 
105 bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
106 {
108  std::unique_ptr<CDBIterator> pcursor(NewIterator());
109  pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
110 
111  // Load m_block_index
112  while (pcursor->Valid()) {
113  if (interrupt) return false;
114  std::pair<uint8_t, uint256> key;
115  if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
116  CDiskBlockIndex diskindex;
117  if (pcursor->GetValue(diskindex)) {
118  // Construct block index object
119  CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
120  pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
121  pindexNew->nHeight = diskindex.nHeight;
122  pindexNew->nFile = diskindex.nFile;
123  pindexNew->nDataPos = diskindex.nDataPos;
124  pindexNew->nUndoPos = diskindex.nUndoPos;
125  pindexNew->nVersion = diskindex.nVersion;
126  pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
127  pindexNew->nTime = diskindex.nTime;
128  pindexNew->nBits = diskindex.nBits;
129  pindexNew->nNonce = diskindex.nNonce;
130  pindexNew->nStatus = diskindex.nStatus;
131  pindexNew->nTx = diskindex.nTx;
132 
133  if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
134  return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
135  }
136 
137  pcursor->Next();
138  } else {
139  return error("%s: failed to read value", __func__);
140  }
141  } else {
142  break;
143  }
144  }
145 
146  return true;
147 }
148 } // namespace kernel
149 
150 namespace node {
151 std::atomic_bool fReindex(false);
152 
154 {
155  // First sort by most total work, ...
156  if (pa->nChainWork > pb->nChainWork) return false;
157  if (pa->nChainWork < pb->nChainWork) return true;
158 
159  // ... then by earliest time received, ...
160  if (pa->nSequenceId < pb->nSequenceId) return false;
161  if (pa->nSequenceId > pb->nSequenceId) return true;
162 
163  // Use pointer address as tie breaker (should only happen with blocks
164  // loaded from disk, as those all have id 0).
165  if (pa < pb) return false;
166  if (pa > pb) return true;
167 
168  // Identical blocks.
169  return false;
170 }
171 
173 {
174  return pa->nHeight < pb->nHeight;
175 }
176 
177 std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
178 {
180  std::vector<CBlockIndex*> rv;
181  rv.reserve(m_block_index.size());
182  for (auto& [_, block_index] : m_block_index) {
183  rv.push_back(&block_index);
184  }
185  return rv;
186 }
187 
189 {
191  BlockMap::iterator it = m_block_index.find(hash);
192  return it == m_block_index.end() ? nullptr : &it->second;
193 }
194 
196 {
198  BlockMap::const_iterator it = m_block_index.find(hash);
199  return it == m_block_index.end() ? nullptr : &it->second;
200 }
201 
203 {
205 
206  auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
207  if (!inserted) {
208  return &mi->second;
209  }
210  CBlockIndex* pindexNew = &(*mi).second;
211 
212  // We assign the sequence id to blocks only when the full data is available,
213  // to avoid miners withholding blocks but broadcasting headers, to get a
214  // competitive advantage.
215  pindexNew->nSequenceId = 0;
216 
217  pindexNew->phashBlock = &((*mi).first);
218  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
219  if (miPrev != m_block_index.end()) {
220  pindexNew->pprev = &(*miPrev).second;
221  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
222  pindexNew->BuildSkip();
223  }
224  pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
225  pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
226  pindexNew->RaiseValidity(BLOCK_VALID_TREE);
227  if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
228  best_header = pindexNew;
229  }
230 
231  m_dirty_blockindex.insert(pindexNew);
232 
233  return pindexNew;
234 }
235 
236 void BlockManager::PruneOneBlockFile(const int fileNumber)
237 {
240 
241  for (auto& entry : m_block_index) {
242  CBlockIndex* pindex = &entry.second;
243  if (pindex->nFile == fileNumber) {
244  pindex->nStatus &= ~BLOCK_HAVE_DATA;
245  pindex->nStatus &= ~BLOCK_HAVE_UNDO;
246  pindex->nFile = 0;
247  pindex->nDataPos = 0;
248  pindex->nUndoPos = 0;
249  m_dirty_blockindex.insert(pindex);
250 
251  // Prune from m_blocks_unlinked -- any block we prune would have
252  // to be downloaded again in order to consider its chain, at which
253  // point it would be considered as a candidate for
254  // m_blocks_unlinked or setBlockIndexCandidates.
255  auto range = m_blocks_unlinked.equal_range(pindex->pprev);
256  while (range.first != range.second) {
257  std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
258  range.first++;
259  if (_it->second == pindex) {
260  m_blocks_unlinked.erase(_it);
261  }
262  }
263  }
264  }
265 
266  m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
267  m_dirty_fileinfo.insert(fileNumber);
268 }
269 
271  std::set<int>& setFilesToPrune,
272  int nManualPruneHeight,
273  const Chainstate& chain,
274  ChainstateManager& chainman)
275 {
276  assert(IsPruneMode() && nManualPruneHeight > 0);
277 
279  if (chain.m_chain.Height() < 0) {
280  return;
281  }
282 
283  const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
284 
285  int count = 0;
286  for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
287  const auto& fileinfo = m_blockfile_info[fileNumber];
288  if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
289  continue;
290  }
291 
292  PruneOneBlockFile(fileNumber);
293  setFilesToPrune.insert(fileNumber);
294  count++;
295  }
296  LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
297  chain.GetRole(), last_block_can_prune, count);
298 }
299 
301  std::set<int>& setFilesToPrune,
302  int last_prune,
303  const Chainstate& chain,
304  ChainstateManager& chainman)
305 {
307  // Distribute our -prune budget over all chainstates.
308  const auto target = std::max(
309  MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
310  const uint64_t target_sync_height = chainman.m_best_header->nHeight;
311 
312  if (chain.m_chain.Height() < 0 || target == 0) {
313  return;
314  }
315  if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
316  return;
317  }
318 
319  const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
320 
321  uint64_t nCurrentUsage = CalculateCurrentUsage();
322  // We don't check to prune until after we've allocated new space for files
323  // So we should leave a buffer under our target to account for another allocation
324  // before the next pruning.
325  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
326  uint64_t nBytesToPrune;
327  int count = 0;
328 
329  if (nCurrentUsage + nBuffer >= target) {
330  // On a prune event, the chainstate DB is flushed.
331  // To avoid excessive prune events negating the benefit of high dbcache
332  // values, we should not prune too rapidly.
333  // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
334  const auto chain_tip_height = chain.m_chain.Height();
335  if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
336  // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
337  static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
338  const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
339  nBuffer += average_block_size * remaining_blocks;
340  }
341 
342  for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
343  const auto& fileinfo = m_blockfile_info[fileNumber];
344  nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
345 
346  if (fileinfo.nSize == 0) {
347  continue;
348  }
349 
350  if (nCurrentUsage + nBuffer < target) { // are we below our target?
351  break;
352  }
353 
354  // don't prune files that could have a block that's not within the allowable
355  // prune range for the chain being pruned.
356  if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
357  continue;
358  }
359 
360  PruneOneBlockFile(fileNumber);
361  // Queue up the files for removal
362  setFilesToPrune.insert(fileNumber);
363  nCurrentUsage -= nBytesToPrune;
364  count++;
365  }
366  }
367 
368  LogPrint(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
369  chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
370  (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
371  min_block_to_prune, last_block_can_prune, count);
372 }
373 
374 void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
376  m_prune_locks[name] = lock_info;
377 }
378 
380 {
382 
383  if (hash.IsNull()) {
384  return nullptr;
385  }
386 
387  const auto [mi, inserted]{m_block_index.try_emplace(hash)};
388  CBlockIndex* pindex = &(*mi).second;
389  if (inserted) {
390  pindex->phashBlock = &((*mi).first);
391  }
392  return pindex;
393 }
394 
395 bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
396 {
397  if (!m_block_tree_db->LoadBlockIndexGuts(
398  GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
399  return false;
400  }
401 
402  if (snapshot_blockhash) {
403  const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
404  if (!maybe_au_data) {
405  m_opts.notifications.fatalError(strprintf("Assumeutxo data not found for the given blockhash '%s'.", snapshot_blockhash->ToString()));
406  return false;
407  }
408  const AssumeutxoData& au_data = *Assert(maybe_au_data);
409  m_snapshot_height = au_data.height;
410  CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
411 
412  // Since nChainTx (responsible for estimated progress) isn't persisted
413  // to disk, we must bootstrap the value for assumedvalid chainstates
414  // from the hardcoded assumeutxo chainparams.
415  base->nChainTx = au_data.nChainTx;
416  LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString());
417  } else {
418  // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
419  // is null. This is relevant during snapshot completion, when the blockman may be loaded
420  // with a height that then needs to be cleared after the snapshot is fully validated.
421  m_snapshot_height.reset();
422  }
423 
424  Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
425 
426  // Calculate nChainWork
427  std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
428  std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
430 
431  CBlockIndex* previous_index{nullptr};
432  for (CBlockIndex* pindex : vSortedByHeight) {
433  if (m_interrupt) return false;
434  if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
435  return error("%s: block index is non-contiguous, index of height %d missing", __func__, previous_index->nHeight + 1);
436  }
437  previous_index = pindex;
438  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
439  pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
440 
441  // We can link the chain of blocks for which we've received transactions at some point, or
442  // blocks that are assumed-valid on the basis of snapshot load (see
443  // PopulateAndValidateSnapshot()).
444  // Pruned nodes may have deleted the block.
445  if (pindex->nTx > 0) {
446  if (pindex->pprev) {
447  if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
448  pindex->GetBlockHash() == *snapshot_blockhash) {
449  // Should have been set above; don't disturb it with code below.
450  Assert(pindex->nChainTx > 0);
451  } else if (pindex->pprev->nChainTx > 0) {
452  pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
453  } else {
454  pindex->nChainTx = 0;
455  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
456  }
457  } else {
458  pindex->nChainTx = pindex->nTx;
459  }
460  }
461  if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
462  pindex->nStatus |= BLOCK_FAILED_CHILD;
463  m_dirty_blockindex.insert(pindex);
464  }
465  if (pindex->pprev) {
466  pindex->BuildSkip();
467  }
468  }
469 
470  return true;
471 }
472 
473 bool BlockManager::WriteBlockIndexDB()
474 {
476  std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
477  vFiles.reserve(m_dirty_fileinfo.size());
478  for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
479  vFiles.emplace_back(*it, &m_blockfile_info[*it]);
480  m_dirty_fileinfo.erase(it++);
481  }
482  std::vector<const CBlockIndex*> vBlocks;
483  vBlocks.reserve(m_dirty_blockindex.size());
484  for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
485  vBlocks.push_back(*it);
486  m_dirty_blockindex.erase(it++);
487  }
488  int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
489  if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
490  return false;
491  }
492  return true;
493 }
494 
495 bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
496 {
497  if (!LoadBlockIndex(snapshot_blockhash)) {
498  return false;
499  }
500  int max_blockfile_num{0};
501 
502  // Load block file info
503  m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
504  m_blockfile_info.resize(max_blockfile_num + 1);
505  LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
506  for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
507  m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
508  }
509  LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
510  for (int nFile = max_blockfile_num + 1; true; nFile++) {
511  CBlockFileInfo info;
512  if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
513  m_blockfile_info.push_back(info);
514  } else {
515  break;
516  }
517  }
518 
519  // Check presence of blk files
520  LogPrintf("Checking all blk files are present...\n");
521  std::set<int> setBlkDataFiles;
522  for (const auto& [_, block_index] : m_block_index) {
523  if (block_index.nStatus & BLOCK_HAVE_DATA) {
524  setBlkDataFiles.insert(block_index.nFile);
525  }
526  }
527  for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
528  FlatFilePos pos(*it, 0);
529  if (OpenBlockFile(pos, true).IsNull()) {
530  return false;
531  }
532  }
533 
534  {
535  // Initialize the blockfile cursors.
537  for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
538  const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
539  m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
540  }
541  }
542 
543  // Check whether we have ever pruned block & undo files
544  m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
545  if (m_have_pruned) {
546  LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
547  }
548 
549  // Check whether we need to continue reindexing
550  bool fReindexing = false;
551  m_block_tree_db->ReadReindexing(fReindexing);
552  if (fReindexing) fReindex = true;
553 
554  return true;
555 }
556 
557 void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
558 {
560  int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
561  if (!m_have_pruned) {
562  return;
563  }
564 
565  std::set<int> block_files_to_prune;
566  for (int file_number = 0; file_number < max_blockfile; file_number++) {
567  if (m_blockfile_info[file_number].nSize == 0) {
568  block_files_to_prune.insert(file_number);
569  }
570  }
571 
572  UnlinkPrunedFiles(block_files_to_prune);
573 }
574 
576 {
577  const MapCheckpoints& checkpoints = data.mapCheckpoints;
578 
579  for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
580  const uint256& hash = i.second;
581  const CBlockIndex* pindex = LookupBlockIndex(hash);
582  if (pindex) {
583  return pindex;
584  }
585  }
586  return nullptr;
587 }
588 
589 bool BlockManager::IsBlockPruned(const CBlockIndex& block)
590 {
592  return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
593 }
594 
595 const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& upper_block, const CBlockIndex* lower_block)
596 {
598  const CBlockIndex* last_block = &upper_block;
599  assert(last_block->nStatus & BLOCK_HAVE_DATA); // 'upper_block' must have data
600  while (last_block->pprev && (last_block->pprev->nStatus & BLOCK_HAVE_DATA)) {
601  if (lower_block) {
602  // Return if we reached the lower_block
603  if (last_block == lower_block) return lower_block;
604  // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
605  // and so far this is not allowed.
606  assert(last_block->nHeight >= lower_block->nHeight);
607  }
608  last_block = last_block->pprev;
609  }
610  assert(last_block != nullptr);
611  return last_block;
612 }
613 
614 bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
615 {
616  if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
617  return GetFirstStoredBlock(upper_block, &lower_block) == &lower_block;
618 }
619 
620 // If we're using -prune with -reindex, then delete block files that will be ignored by the
621 // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
622 // is missing, do the same here to delete any later block files after a gap. Also delete all
623 // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
624 // is in sync with what's actually on disk by the time we start downloading, so that pruning
625 // works correctly.
627 {
628  std::map<std::string, fs::path> mapBlockFiles;
629 
630  // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
631  // Remove the rev files immediately and insert the blk file paths into an
632  // ordered map keyed by block file index.
633  LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
634  for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
635  const std::string path = fs::PathToString(it->path().filename());
636  if (fs::is_regular_file(*it) &&
637  path.length() == 12 &&
638  path.substr(8,4) == ".dat")
639  {
640  if (path.substr(0, 3) == "blk") {
641  mapBlockFiles[path.substr(3, 5)] = it->path();
642  } else if (path.substr(0, 3) == "rev") {
643  remove(it->path());
644  }
645  }
646  }
647 
648  // Remove all block files that aren't part of a contiguous set starting at
649  // zero by walking the ordered map (keys are block file indices) by
650  // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
651  // start removing block files.
652  int nContigCounter = 0;
653  for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
654  if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
655  nContigCounter++;
656  continue;
657  }
658  remove(item.second);
659  }
660 }
661 
663 {
665 
666  return &m_blockfile_info.at(n);
667 }
668 
669 bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
670 {
671  // Open history file to append
672  AutoFile fileout{OpenUndoFile(pos)};
673  if (fileout.IsNull()) {
674  return error("%s: OpenUndoFile failed", __func__);
675  }
676 
677  // Write index header
678  unsigned int nSize = GetSerializeSize(blockundo);
679  fileout << GetParams().MessageStart() << nSize;
680 
681  // Write undo data
682  long fileOutPos = ftell(fileout.Get());
683  if (fileOutPos < 0) {
684  return error("%s: ftell failed", __func__);
685  }
686  pos.nPos = (unsigned int)fileOutPos;
687  fileout << blockundo;
688 
689  // calculate & write checksum
690  HashWriter hasher{};
691  hasher << hashBlock;
692  hasher << blockundo;
693  fileout << hasher.GetHash();
694 
695  return true;
696 }
697 
698 bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
699 {
700  const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
701 
702  if (pos.IsNull()) {
703  return error("%s: no undo data available", __func__);
704  }
705 
706  // Open history file to read
707  AutoFile filein{OpenUndoFile(pos, true)};
708  if (filein.IsNull()) {
709  return error("%s: OpenUndoFile failed", __func__);
710  }
711 
712  // Read block
713  uint256 hashChecksum;
714  HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
715  try {
716  verifier << index.pprev->GetBlockHash();
717  verifier >> blockundo;
718  filein >> hashChecksum;
719  } catch (const std::exception& e) {
720  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
721  }
722 
723  // Verify checksum
724  if (hashChecksum != verifier.GetHash()) {
725  return error("%s: Checksum mismatch", __func__);
726  }
727 
728  return true;
729 }
730 
731 bool BlockManager::FlushUndoFile(int block_file, bool finalize)
732 {
733  FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
734  if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
735  m_opts.notifications.flushError("Flushing undo file to disk failed. This is likely the result of an I/O error.");
736  return false;
737  }
738  return true;
739 }
740 
741 bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
742 {
743  bool success = true;
745 
746  if (m_blockfile_info.size() < 1) {
747  // Return if we haven't loaded any blockfiles yet. This happens during
748  // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
749  // then calls FlushStateToDisk()), resulting in a call to this function before we
750  // have populated `m_blockfile_info` via LoadBlockIndexDB().
751  return true;
752  }
753  assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
754 
755  FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
756  if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
757  m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error.");
758  success = false;
759  }
760  // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
761  // e.g. during IBD or a sync after a node going offline
762  if (!fFinalize || finalize_undo) {
763  if (!FlushUndoFile(blockfile_num, finalize_undo)) {
764  success = false;
765  }
766  }
767  return success;
768 }
769 
771 {
772  if (!m_snapshot_height) {
773  return BlockfileType::NORMAL;
774  }
775  return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
776 }
777 
779 {
781  auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
782  // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
783  // but no blocks past the snapshot height have been written yet, so there
784  // is no data associated with the chainstate, and it is safe not to flush.
785  if (cursor) {
786  return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
787  }
788  // No need to log warnings in this case.
789  return true;
790 }
791 
793 {
795 
796  uint64_t retval = 0;
797  for (const CBlockFileInfo& file : m_blockfile_info) {
798  retval += file.nSize + file.nUndoSize;
799  }
800  return retval;
801 }
802 
803 void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
804 {
805  std::error_code ec;
806  for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
807  FlatFilePos pos(*it, 0);
808  const bool removed_blockfile{fs::remove(BlockFileSeq().FileName(pos), ec)};
809  const bool removed_undofile{fs::remove(UndoFileSeq().FileName(pos), ec)};
810  if (removed_blockfile || removed_undofile) {
811  LogPrint(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
812  }
813  }
814 }
815 
817 {
818  return FlatFileSeq(m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
819 }
820 
822 {
824 }
825 
826 AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
827 {
828  return AutoFile{BlockFileSeq().Open(pos, fReadOnly)};
829 }
830 
832 AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
833 {
834  return AutoFile{UndoFileSeq().Open(pos, fReadOnly)};
835 }
836 
838 {
839  return BlockFileSeq().FileName(pos);
840 }
841 
842 bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown)
843 {
845 
846  const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
847 
848  if (!m_blockfile_cursors[chain_type]) {
849  // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
850  assert(chain_type == BlockfileType::ASSUMED);
851  const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
852  m_blockfile_cursors[chain_type] = new_cursor;
853  LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
854  }
855  const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
856 
857  int nFile = fKnown ? pos.nFile : last_blockfile;
858  if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
859  m_blockfile_info.resize(nFile + 1);
860  }
861 
862  bool finalize_undo = false;
863  if (!fKnown) {
864  unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
865  // Use smaller blockfiles in test-only -fastprune mode - but avoid
866  // the possibility of having a block not fit into the block file.
867  if (m_opts.fast_prune) {
868  max_blockfile_size = 0x10000; // 64kiB
869  if (nAddSize >= max_blockfile_size) {
870  // dynamically adjust the blockfile size to be larger than the added size
871  max_blockfile_size = nAddSize + 1;
872  }
873  }
874  assert(nAddSize < max_blockfile_size);
875 
876  while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
877  // when the undo file is keeping up with the block file, we want to flush it explicitly
878  // when it is lagging behind (more blocks arrive than are being connected), we let the
879  // undo block write case handle it
880  finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
881  Assert(m_blockfile_cursors[chain_type])->undo_height);
882 
883  // Try the next unclaimed blockfile number
884  nFile = this->MaxBlockfileNum() + 1;
885  // Set to increment MaxBlockfileNum() for next iteration
886  m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
887 
888  if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
889  m_blockfile_info.resize(nFile + 1);
890  }
891  }
892  pos.nFile = nFile;
893  pos.nPos = m_blockfile_info[nFile].nSize;
894  }
895 
896  if (nFile != last_blockfile) {
897  if (!fKnown) {
898  LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
899  last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
900  }
901 
902  // Do not propagate the return code. The flush concerns a previous block
903  // and undo file that has already been written to. If a flush fails
904  // here, and we crash, there is no expected additional block data
905  // inconsistency arising from the flush failure here. However, the undo
906  // data may be inconsistent after a crash if the flush is called during
907  // a reindex. A flush error might also leave some of the data files
908  // untrimmed.
909  if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) {
911  "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n",
912  last_blockfile, !fKnown, finalize_undo, nFile);
913  }
914  // No undo data yet in the new file, so reset our undo-height tracking.
915  m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
916  }
917 
918  m_blockfile_info[nFile].AddBlock(nHeight, nTime);
919  if (fKnown) {
920  m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize);
921  } else {
922  m_blockfile_info[nFile].nSize += nAddSize;
923  }
924 
925  if (!fKnown) {
926  bool out_of_space;
927  size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
928  if (out_of_space) {
929  m_opts.notifications.fatalError("Disk space is too low!", _("Disk space is too low!"));
930  return false;
931  }
932  if (bytes_allocated != 0 && IsPruneMode()) {
933  m_check_for_pruning = true;
934  }
935  }
936 
937  m_dirty_fileinfo.insert(nFile);
938  return true;
939 }
940 
941 bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
942 {
943  pos.nFile = nFile;
944 
946 
947  pos.nPos = m_blockfile_info[nFile].nUndoSize;
948  m_blockfile_info[nFile].nUndoSize += nAddSize;
949  m_dirty_fileinfo.insert(nFile);
950 
951  bool out_of_space;
952  size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
953  if (out_of_space) {
954  return FatalError(m_opts.notifications, state, "Disk space is too low!", _("Disk space is too low!"));
955  }
956  if (bytes_allocated != 0 && IsPruneMode()) {
957  m_check_for_pruning = true;
958  }
959 
960  return true;
961 }
962 
963 bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
964 {
965  // Open history file to append
966  AutoFile fileout{OpenBlockFile(pos)};
967  if (fileout.IsNull()) {
968  return error("WriteBlockToDisk: OpenBlockFile failed");
969  }
970 
971  // Write index header
972  unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block));
973  fileout << GetParams().MessageStart() << nSize;
974 
975  // Write block
976  long fileOutPos = ftell(fileout.Get());
977  if (fileOutPos < 0) {
978  return error("WriteBlockToDisk: ftell failed");
979  }
980  pos.nPos = (unsigned int)fileOutPos;
981  fileout << TX_WITH_WITNESS(block);
982 
983  return true;
984 }
985 
986 bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
987 {
989  const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
990  auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
991 
992  // Write undo information to disk
993  if (block.GetUndoPos().IsNull()) {
994  FlatFilePos _pos;
995  if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
996  return error("ConnectBlock(): FindUndoPos failed");
997  }
998  if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
999  return FatalError(m_opts.notifications, state, "Failed to write undo data");
1000  }
1001  // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1002  // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1003  // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1004  // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1005  // the FindBlockPos function
1006  if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
1007  // Do not propagate the return code, a failed flush here should not
1008  // be an indication for a failed write. If it were propagated here,
1009  // the caller would assume the undo data not to be written, when in
1010  // fact it is. Note though, that a failed flush might leave the data
1011  // file untrimmed.
1012  if (!FlushUndoFile(_pos.nFile, true)) {
1013  LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
1014  }
1015  } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
1016  cursor.undo_height = block.nHeight;
1017  }
1018  // update nUndoPos in block index
1019  block.nUndoPos = _pos.nPos;
1020  block.nStatus |= BLOCK_HAVE_UNDO;
1021  m_dirty_blockindex.insert(&block);
1022  }
1023 
1024  return true;
1025 }
1026 
1028 {
1029  block.SetNull();
1030 
1031  // Open history file to read
1032  AutoFile filein{OpenBlockFile(pos, true)};
1033  if (filein.IsNull()) {
1034  return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
1035  }
1036 
1037  // Read block
1038  try {
1039  filein >> TX_WITH_WITNESS(block);
1040  } catch (const std::exception& e) {
1041  return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
1042  }
1043 
1044  // Check the header
1045  if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
1046  return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
1047  }
1048 
1049  // Signet only: check block solution
1051  return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString());
1052  }
1053 
1054  return true;
1055 }
1056 
1057 bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const
1058 {
1059  const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1060 
1061  if (!ReadBlockFromDisk(block, block_pos)) {
1062  return false;
1063  }
1064  if (block.GetHash() != index.GetBlockHash()) {
1065  return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1066  index.ToString(), block_pos.ToString());
1067  }
1068  return true;
1069 }
1070 
1071 bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1072 {
1073  FlatFilePos hpos = pos;
1074  hpos.nPos -= 8; // Seek back 8 bytes for meta header
1075  AutoFile filein{OpenBlockFile(hpos, true)};
1076  if (filein.IsNull()) {
1077  return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
1078  }
1079 
1080  try {
1081  MessageStartChars blk_start;
1082  unsigned int blk_size;
1083 
1084  filein >> blk_start >> blk_size;
1085 
1086  if (blk_start != GetParams().MessageStart()) {
1087  return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
1088  HexStr(blk_start),
1089  HexStr(GetParams().MessageStart()));
1090  }
1091 
1092  if (blk_size > MAX_SIZE) {
1093  return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
1094  blk_size, MAX_SIZE);
1095  }
1096 
1097  block.resize(blk_size); // Zeroing of memory is intentional here
1098  filein.read(MakeWritableByteSpan(block));
1099  } catch (const std::exception& e) {
1100  return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
1101  }
1102 
1103  return true;
1104 }
1105 
1107 {
1108  unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block));
1109  FlatFilePos blockPos;
1110  const auto position_known {dbp != nullptr};
1111  if (position_known) {
1112  blockPos = *dbp;
1113  } else {
1114  // when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for
1115  // the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE).
1116  // we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk.
1117  nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
1118  }
1119  if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime(), position_known)) {
1120  error("%s: FindBlockPos failed", __func__);
1121  return FlatFilePos();
1122  }
1123  if (!position_known) {
1124  if (!WriteBlockToDisk(block, blockPos)) {
1125  m_opts.notifications.fatalError("Failed to write block");
1126  return FlatFilePos();
1127  }
1128  }
1129  return blockPos;
1130 }
1131 
1133 {
1134  std::atomic<bool>& m_importing;
1135 
1136 public:
1137  ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1138  {
1139  assert(m_importing == false);
1140  m_importing = true;
1141  }
1143  {
1144  assert(m_importing == true);
1145  m_importing = false;
1146  }
1147 };
1148 
1149 void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
1150 {
1152 
1153  {
1154  ImportingNow imp{chainman.m_blockman.m_importing};
1155 
1156  // -reindex
1157  if (fReindex) {
1158  int nFile = 0;
1159  // Map of disk positions for blocks with unknown parent (only used for reindex);
1160  // parent hash -> child disk position, multiple children can have the same parent.
1161  std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1162  while (true) {
1163  FlatFilePos pos(nFile, 0);
1164  if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1165  break; // No block files left to reindex
1166  }
1167  AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1168  if (file.IsNull()) {
1169  break; // This error is logged in OpenBlockFile
1170  }
1171  LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1172  chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1173  if (chainman.m_interrupt) {
1174  LogPrintf("Interrupt requested. Exit %s\n", __func__);
1175  return;
1176  }
1177  nFile++;
1178  }
1179  WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1180  fReindex = false;
1181  LogPrintf("Reindexing finished\n");
1182  // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1183  chainman.ActiveChainstate().LoadGenesisBlock();
1184  }
1185 
1186  // -loadblock=
1187  for (const fs::path& path : vImportFiles) {
1188  AutoFile file{fsbridge::fopen(path, "rb")};
1189  if (!file.IsNull()) {
1190  LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1191  chainman.LoadExternalBlockFile(file);
1192  if (chainman.m_interrupt) {
1193  LogPrintf("Interrupt requested. Exit %s\n", __func__);
1194  return;
1195  }
1196  } else {
1197  LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1198  }
1199  }
1200 
1201  // scan for better chains in the block chain database, that are not yet connected in the active best chain
1202 
1203  // We can't hold cs_main during ActivateBestChain even though we're accessing
1204  // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1205  // the relevant pointers before the ABC call.
1206  for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1207  BlockValidationState state;
1208  if (!chainstate->ActivateBestChain(state, nullptr)) {
1209  chainman.GetNotifications().fatalError(strprintf("Failed to connect best block (%s)", state.ToString()));
1210  return;
1211  }
1212  }
1213  } // End scope of ImportingNow
1214 }
1215 
1216 std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1217  switch(type) {
1218  case BlockfileType::NORMAL: os << "normal"; break;
1219  case BlockfileType::ASSUMED: os << "assumed"; break;
1220  default: os.setstate(std::ios_base::failbit);
1221  }
1222  return os;
1223 }
1224 
1225 std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1226  os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1227  return os;
1228 }
1229 } // namespace node
bool Exists(const K &key) const
Definition: dbwrapper.h:257
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:174
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:963
std::optional< int > m_snapshot_height
The height of the base block of an assumeutxo snapshot, if one is in use.
Definition: blockstorage.h:276
std::string ToString() const
Definition: chain.cpp:15
bool WriteBatchSync(const std::vector< std::pair< int, const CBlockFileInfo *>> &fileInfo, int nLastFile, const std::vector< const CBlockIndex *> &blockinfo)
int32_t nSequenceId
(memory only) Sequential id assigned to distinguish order in which blocks are received.
Definition: chain.h:209
std::set< int > m_dirty_fileinfo
Dirty block file entries.
Definition: blockstorage.h:237
const util::SignalInterrupt & m_interrupt
Definition: blockstorage.h:257
AssertLockHeld(pool.cs)
void ImportBlocks(ChainstateManager &chainman, std::vector< fs::path > vImportFiles)
bool m_check_for_pruning
Global flag to indicate we should check to see if there are block/undo files that should be deleted...
Definition: blockstorage.h:229
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:319
#define LogPrint(category,...)
Definition: logging.h:264
assert(!tx.IsCoinBase())
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:26
static constexpr uint8_t DB_FLAG
void CleanupBlockRevFiles() const
descends from failed block
Definition: chain.h:122
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:156
Batch of changes queued to be written to a CDBWrapper.
Definition: dbwrapper.h:72
uint64_t CalculateCurrentUsage()
Calculate the amount of disk space the block & undo files currently use.
std::atomic_bool fReindex
static constexpr uint8_t DB_BLOCK_INDEX
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info)
Definition: block.h:68
RecursiveMutex cs_LastBlockFile
Definition: blockstorage.h:199
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:846
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1162
reverse_range< T > reverse_iterate(T &x)
size_t GetSerializeSize(const T &t)
Definition: serialize.h:1116
bool IsNull() const
Definition: flatfile.h:36
const Consensus::Params & GetConsensus() const
Definition: blockstorage.h:143
Span< std::byte > MakeWritableByteSpan(V &&v) noexcept
Definition: span.h:282
unsigned int nHeight
bool WriteReindexing(bool fReindexing)
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:97
int Height() const
Return the maximal height in the chain.
Definition: chain.h:492
virtual void flushError(const std::string &debug_message)
The flush error notification is sent to notify the user that an error occurred while flushing block d...
AutoFile OpenUndoFile(const FlatFilePos &pos, bool fReadOnly=false) const
Open an undo file (rev?????.dat)
bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
Return false if block file or undo file flushing fails.
unsigned int nChainTx
Used to populate the nChainTx value, which is used during BlockManager::LoadBlockIndex().
Definition: chainparams.h:57
std::atomic< bool > & m_importing
uint32_t nTime
Definition: chain.h:204
undo data available in rev*.dat
Definition: chain.h:118
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
void LoadExternalBlockFile(AutoFile &file_in, FlatFilePos *dbp=nullptr, std::multimap< uint256, FlatFilePos > *blocks_with_unknown_parent=nullptr)
Import blocks from an external file.
bool ReadFlag(const std::string &name, bool &fValue)
int nFile
Definition: flatfile.h:16
CChain m_chain
The current chain of blockheaders we consult and build on.
Definition: validation.h:569
void UnlinkPrunedFiles(const std::set< int > &setFilesToPrune) const
Actually unlink the specified files.
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:388
unsigned int nChainTx
(memory only) Number of transactions in the chain up to and including this block. ...
Definition: chain.h:191
static constexpr uint8_t DB_REINDEX_FLAG
FlatFileSeq BlockFileSeq() const
const util::SignalInterrupt & m_interrupt
Definition: validation.h:958
FILE * Open(const FlatFilePos &pos, bool read_only=false)
Open a handle to the file at the given position.
Definition: flatfile.cpp:33
std::vector< CBlockIndex * > GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(std::multimap< CBlockIndex *, CBlockIndex * > m_blocks_unlinked
All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
Definition: blockstorage.h:278
bool ReadLastBlockFile(int &nFile)
bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos, const uint256 &hashBlock) const
bool WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight, const FlatFilePos *dbp)
Store block on disk.
Definition: blockstorage.h:316
std::array< uint8_t, 4 > MessageStartChars
uint256 GetBlockHash() const
Definition: chain.h:258
void FindFilesToPrune(std::set< int > &setFilesToPrune, int last_prune, const Chainstate &chain, ChainstateManager &chainman)
Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a use...
void FindFilesToPruneManual(std::set< int > &setFilesToPrune, int nManualPruneHeight, const Chainstate &chain, ChainstateManager &chainman)
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
Definition: blockstorage.h:217
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:109
uint64_t PruneAfterHeight() const
Definition: chainparams.h:104
#define LOCK2(cs1, cs2)
Definition: sync.h:258
static const unsigned int BLOCKFILE_CHUNK_SIZE
The pre-allocation chunk size for blk?????.dat files (since 0.8)
Definition: blockstorage.h:70
bool IsBlockPruned(const CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(void UpdatePruneLock(const std::string &name, const PruneLockInfo &lock_info) EXCLUSIVE_LOCKS_REQUIRED(AutoFile OpenBlockFile(const FlatFilePos &pos, bool fReadOnly=false) const
Check whether the block associated with this index entry is pruned or not.
Definition: blockstorage.h:353
bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(bool LoadBlockIndexDB(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(void ScanAndUnlinkAlreadyPrunedFiles() EXCLUSIVE_LOCKS_REQUIRED(CBlockIndex * AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Remove any pruned block & undo files that are still on disk.
Definition: blockstorage.h:299
unsigned int nTimeMax
(memory only) Maximum nTime in the chain up to and including this block.
Definition: chain.h:212
static std::string PathToString(const path &path)
Convert path object to a byte string.
Definition: fs.h:151
Chainstate stores and provides an API to update our local knowledge of the current best chain...
Definition: validation.h:488
std::string HexStr(const Span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE
Size of header written by WriteBlockToDisk before a serialized CBlock.
Definition: blockstorage.h:77
bool Erase(const K &key, bool fSync=false)
Definition: dbwrapper.h:266
bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
uint32_t nNonce
Definition: chain.h:206
#define LOCK(cs)
Definition: sync.h:257
const char * name
Definition: rest.cpp:49
std::string ToString() const
Definition: validation.h:128
bilingual_str _(const char *psz)
Translation function.
Definition: translation.h:74
FlatFileSeq UndoFileSeq() const
std::optional< AssumeutxoData > AssumeutxoForBlockhash(const uint256 &blockhash) const
Definition: chainparams.h:126
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:100
ImportingNow(std::atomic< bool > &importing)
kernel::Notifications & GetNotifications() const
Definition: validation.h:936
const kernel::BlockManagerOpts m_opts
Definition: blockstorage.h:249
uint256 hashPrevBlock
Definition: block.h:26
CDBIterator * NewIterator()
Definition: dbwrapper.cpp:393
BlockfileType BlockfileTypeForHeight(int height)
bool signet_blocks
If true, witness commitments contain a payload equal to a Bitcoin Script solution to the signet chall...
Definition: params.h:128
bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex &index) const
uint256 hashMerkleRoot
Definition: chain.h:203
void Write(const K &key, const V &value)
Definition: dbwrapper.h:99
#define LogPrintLevel(category, level,...)
Definition: logging.h:252
void BuildSkip()
Build the skiplist pointer for this entry.
Definition: chain.cpp:125
Used to marshal pointers into hashes for db storage.
Definition: chain.h:384
bool LoadBlockIndex(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Load the blocktree off disk and into memory.
Parameters that influence chain consensus.
Definition: params.h:74
bool WriteFlag(const std::string &name, bool fValue)
Notifications & notifications
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params &params)
Check whether a block hash satisfies the proof-of-work requirement specified by nBits.
Definition: pow.cpp:125
int64_t GetBlockTime() const
Definition: block.h:61
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
BlockfileType
Definition: blockstorage.h:100
bool Read(const K &key, V &value) const
Definition: dbwrapper.h:221
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown)
constexpr bool IsNull() const
Definition: uint256.h:42
Chainstate &InitializeChainstate(CTxMemPool *mempool) EXCLUSIVE_LOCKS_REQUIRED(std::vector< Chainstate * GetAll)()
Instantiate a new chainstate.
Definition: validation.h:1034
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:301
bool CheckBlockDataAvailability(const CBlockIndex &upper_block LIFETIMEBOUND, const CBlockIndex &lower_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetFirstStoredBlock(const CBlockIndex &start_block LIFETIMEBOUND, const CBlockIndex *lower_block=nullptr) EXCLUSIVE_LOCKS_REQUIRED(boo m_have_pruned)
Check if all blocks in the [upper_block, lower_block] range have data available.
Definition: blockstorage.h:344
size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space)
Allocate additional space in a file after the given starting position.
Definition: flatfile.cpp:55
FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:234
const CBlockIndex * GetLastCheckpoint(const CCheckpointData &data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Returns last CBlockIndex* that is a checkpoint.
const CChainParams & GetParams() const
Definition: blockstorage.h:142
Definition: init.h:25
Helper class that manages an interrupt flag, and allows a thread or signal to interrupt another threa...
uint256 GetHash() const
Definition: block.cpp:11
int32_t nVersion
block header
Definition: chain.h:202
const CChainParams & GetParams() const
Definition: validation.h:931
std::string ToString() const
Definition: flatfile.cpp:23
256-bit opaque blob.
Definition: uint256.h:106
uint256 ConstructBlockHash() const
Definition: chain.h:429
uint256 hashPrev
Definition: chain.h:395
bool Write(const K &key, const V &value, bool fSync=false)
Definition: dbwrapper.h:241
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
virtual void fatalError(const std::string &debug_message, const bilingual_str &user_message={})
The fatal error notification is sent to notify the user when an error occurs in kernel code that can&#39;...
bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos) const
fs::path FileName(const FlatFilePos &pos) const
Get the name of the file at the given position.
Definition: flatfile.cpp:28
fs::path GetBlockPosFilename(const FlatFilePos &pos) const
Translation to a filesystem path.
static constexpr uint8_t DB_LAST_BLOCK
bool FlushUndoFile(int block_file, bool finalize=false)
Return false if undo file flushing fails.
MapCheckpoints mapCheckpoints
Definition: chainparams.h:30
std::set< CBlockIndex * > m_dirty_blockindex
Dirty block index entries.
Definition: blockstorage.h:234
void SetNull()
Definition: block.h:95
bool error(const char *fmt, const Args &... args)
Definition: logging.h:267
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:149
Undo information for a CBlock.
Definition: undo.h:62
void ScheduleBatchPriority()
On platforms that support it, tell the kernel the calling thread is CPU-intensive and non-interactive...
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
const MessageStartChars & MessageStart() const
Definition: chainparams.h:94
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
static const unsigned int MAX_BLOCKFILE_SIZE
The maximum size of a blk?????.dat file (since 0.8)
Definition: blockstorage.h:74
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(
Raise the validity level of this block index entry.
Definition: chain.h:331
Holds configuration for use during UTXO snapshot load and validation.
Definition: chainparams.h:47
static constexpr uint64_t MAX_SIZE
The maximum size of a serialized object in bytes or number of elements (for eg vectors) when the size...
Definition: serialize.h:32
static int count
CBlockIndex * InsertBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Create a new block index entry for a given block hash.
bool FlushChainstateBlockFile(int tip_height)
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:150
static const unsigned int UNDOFILE_CHUNK_SIZE
The pre-allocation chunk size for rev?????.dat files (since 0.8)
Definition: blockstorage.h:72
void ReadReindexing(bool &fReindexing)
bool WriteBatch(CDBBatch &batch, bool fSync=false)
Definition: dbwrapper.cpp:291
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:131
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:162
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:223
bool FatalError(Notifications &notifications, BlockValidationState &state, const std::string &strMessage, const bilingual_str &userMessage)
full block available in blk*.dat
Definition: chain.h:117
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES
Definition: validation.h:77
static bool exists(const path &p)
Definition: fs.h:89
CBlockFileInfo * GetBlockFileInfo(size_t n)
Get block file info entry for one block file.
#define LogPrintf(...)
Definition: logging.h:245
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mark one block file as pruned (modify associated database entries)
FlatFileSeq represents a sequence of numbered files storing raw data.
Definition: flatfile.h:45
static constexpr uint8_t DB_BLOCK_FILES
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:32
std::atomic< bool > m_importing
Definition: blockstorage.h:260
uint64_t GetPruneTarget() const
Attempt to stay below this number of bytes of block files.
Definition: blockstorage.h:322
unsigned int nPos
Definition: flatfile.h:17
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: cs_main.cpp:8
std::ostream & operator<<(std::ostream &os, const BlockfileType &type)
uint32_t nBits
Definition: chain.h:205
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:181
std::map< int, uint256 > MapCheckpoints
Definition: chainparams.h:27
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1065
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:21
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
#define Assert(val)
Identity function.
Definition: check.h:77
uint32_t nBits
Definition: block.h:29
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:195
bool CheckSignetBlockSolution(const CBlock &block, const Consensus::Params &consensusParams)
Extract signature and check whether a block has a valid solution.
Definition: signet.cpp:124
const uint256 * phashBlock
pointer to the hash of the block, if any. Memory is owned by this CBlockIndex
Definition: chain.h:153
std::vector< CBlockFileInfo > m_blockfile_info
Definition: blockstorage.h:200