Bitcoin Core  29.1.0
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2022 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/amount.h>
14 #include <consensus/validation.h>
15 #include <deploymentstatus.h>
16 #include <hash.h>
17 #include <headerssync.h>
18 #include <index/blockfilterindex.h>
19 #include <kernel/chain.h>
20 #include <kernel/mempool_entry.h>
21 #include <logging.h>
22 #include <merkleblock.h>
23 #include <netbase.h>
24 #include <netmessagemaker.h>
25 #include <node/blockstorage.h>
26 #include <node/timeoffsets.h>
27 #include <node/txdownloadman.h>
28 #include <node/txreconciliation.h>
29 #include <node/warnings.h>
30 #include <policy/fees.h>
31 #include <policy/policy.h>
32 #include <policy/settings.h>
33 #include <primitives/block.h>
34 #include <primitives/transaction.h>
35 #include <random.h>
36 #include <scheduler.h>
37 #include <streams.h>
38 #include <sync.h>
39 #include <tinyformat.h>
40 #include <txmempool.h>
41 #include <txorphanage.h>
42 #include <txrequest.h>
43 #include <util/check.h>
44 #include <util/strencodings.h>
45 #include <util/time.h>
46 #include <util/trace.h>
47 #include <validation.h>
48 
49 #include <algorithm>
50 #include <atomic>
51 #include <future>
52 #include <memory>
53 #include <optional>
54 #include <ranges>
55 #include <typeinfo>
56 #include <utility>
57 
58 using namespace util::hex_literals;
59 
60 TRACEPOINT_SEMAPHORE(net, inbound_message);
61 TRACEPOINT_SEMAPHORE(net, misbehaving_connection);
62 
65 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
66 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
68 static constexpr auto HEADERS_RESPONSE_TIME{2min};
72 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
74 static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
76 static constexpr auto STALE_CHECK_INTERVAL{10min};
78 static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
80 static constexpr auto MINIMUM_CONNECT_TIME{30s};
82 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
85 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
88 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
90 static constexpr auto PING_INTERVAL{2min};
92 static const unsigned int MAX_LOCATOR_SZ = 101;
94 static const unsigned int MAX_INV_SZ = 50000;
96 static const unsigned int MAX_GETDATA_SZ = 1000;
98 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
101 static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
103 static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
106 static const int MAX_CMPCTBLOCK_DEPTH = 5;
108 static const int MAX_BLOCKTXN_DEPTH = 10;
109 static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high");
114 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
116 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
118 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
120 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
122 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
124 static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
126 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
128 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
130 static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
133 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
137 static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
140 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
144 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
145 static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
146 static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
148 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
150 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
152 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
154 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
156 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
158 static constexpr size_t MAX_ADDR_TO_SEND{1000};
161 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
167 static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
168 
169 // Internal stuff
170 namespace {
172 struct QueuedBlock {
174  const CBlockIndex* pindex;
176  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
177 };
178 
191 struct Peer {
193  const NodeId m_id{0};
194 
208  const ServiceFlags m_our_services;
210  std::atomic<ServiceFlags> m_their_services{NODE_NONE};
211 
213  const bool m_is_inbound;
214 
216  Mutex m_misbehavior_mutex;
218  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
219 
221  Mutex m_block_inv_mutex;
225  std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
229  std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
234  uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
235 
237  bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
238 
240  std::atomic<int> m_starting_height{-1};
241 
243  std::atomic<uint64_t> m_ping_nonce_sent{0};
245  std::atomic<std::chrono::microseconds> m_ping_start{0us};
247  std::atomic<bool> m_ping_queued{false};
248 
250  std::atomic<bool> m_wtxid_relay{false};
257  std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
258 
259  struct TxRelay {
260  mutable RecursiveMutex m_bloom_filter_mutex;
262  bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
264  std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
265 
266  mutable RecursiveMutex m_tx_inventory_mutex;
270  CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
275  std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
279  bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
282  std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
285  uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
286 
288  std::atomic<CAmount> m_fee_filter_received{0};
289  };
290 
291  /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
292  TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
293  {
294  LOCK(m_tx_relay_mutex);
295  Assume(!m_tx_relay);
296  m_tx_relay = std::make_unique<Peer::TxRelay>();
297  return m_tx_relay.get();
298  };
299 
300  TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
301  {
302  return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
303  };
304 
306  std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
316  std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
331  std::atomic_bool m_addr_relay_enabled{false};
333  bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
335  mutable Mutex m_addr_send_times_mutex;
337  std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
339  std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
342  std::atomic_bool m_wants_addrv2{false};
344  bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
347  double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
349  std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
351  std::atomic<uint64_t> m_addr_rate_limited{0};
353  std::atomic<uint64_t> m_addr_processed{0};
354 
356  bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
357 
359  Mutex m_getdata_requests_mutex;
361  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
362 
365 
367  Mutex m_headers_sync_mutex;
370  std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
371 
373  std::atomic<bool> m_sent_sendheaders{false};
374 
376  std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
377 
379  bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
380 
383  std::atomic<std::chrono::seconds> m_time_offset{0s};
384 
385  explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound)
386  : m_id{id}
387  , m_our_services{our_services}
388  , m_is_inbound{is_inbound}
389  {}
390 
391 private:
392  mutable Mutex m_tx_relay_mutex;
393 
395  std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
396 };
397 
398 using PeerRef = std::shared_ptr<Peer>;
399 
406 struct CNodeState {
408  const CBlockIndex* pindexBestKnownBlock{nullptr};
410  uint256 hashLastUnknownBlock{};
412  const CBlockIndex* pindexLastCommonBlock{nullptr};
414  const CBlockIndex* pindexBestHeaderSent{nullptr};
416  bool fSyncStarted{false};
418  std::chrono::microseconds m_stalling_since{0us};
419  std::list<QueuedBlock> vBlocksInFlight;
421  std::chrono::microseconds m_downloading_since{0us};
423  bool fPreferredDownload{false};
425  bool m_requested_hb_cmpctblocks{false};
427  bool m_provides_cmpctblocks{false};
428 
453  struct ChainSyncTimeoutState {
455  std::chrono::seconds m_timeout{0s};
457  const CBlockIndex* m_work_header{nullptr};
459  bool m_sent_getheaders{false};
461  bool m_protect{false};
462  };
463 
464  ChainSyncTimeoutState m_chain_sync;
465 
467  int64_t m_last_block_announcement{0};
468 };
469 
470 class PeerManagerImpl final : public PeerManager
471 {
472 public:
473  PeerManagerImpl(CConnman& connman, AddrMan& addrman,
474  BanMan* banman, ChainstateManager& chainman,
475  CTxMemPool& pool, node::Warnings& warnings, Options opts);
476 
478  void ActiveTipChange(const CBlockIndex& new_tip, bool) override
479  EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
480  void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
481  EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
482  void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
483  EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
484  void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
485  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
486  void BlockChecked(const CBlock& block, const BlockValidationState& state) override
487  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
488  void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
489  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
490 
492  void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex);
493  void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex);
494  bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
495  bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
496  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
497  bool SendMessages(CNode* pto) override
498  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex);
499 
501  void StartScheduledTasks(CScheduler& scheduler) override;
502  void CheckForStaleTipAndEvictPeers() override;
503  std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
504  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
505  bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
506  std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
507  PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
508  void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
509  void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
510  void SetBestBlock(int height, std::chrono::seconds time) override
511  {
512  m_best_height = height;
513  m_best_block_time = time;
514  };
515  void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };
516  void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
517  const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
518  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
519  void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
520  ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
521 
522 private:
524  void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
525 
527  void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
528 
530  void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
531 
534  PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
535 
538  PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
539 
542  void Misbehaving(Peer& peer, const std::string& message);
543 
552  void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
553  bool via_compact_block, const std::string& message = "")
554  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
555 
559  void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
560  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
561 
568  bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
569 
581  std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
582  bool first_time_failure)
583  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
584 
587  void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
588  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
589 
593  void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
594  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
595 
607  bool ProcessOrphanTx(Peer& peer)
608  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex);
609 
617  void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
618  std::vector<CBlockHeader>&& headers,
619  bool via_compact_block)
620  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
623  bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
625  arith_uint256 GetAntiDoSWorkThreshold();
629  void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
631  bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
650  bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
651  std::vector<CBlockHeader>& headers)
652  EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
664  bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
665  const CBlockIndex* chain_start_header,
666  std::vector<CBlockHeader>& headers)
667  EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
668 
671  bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
672 
677  bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
679  void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
681  void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
682  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
683 
684  void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
685 
687  void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); }
688  template <typename... Args>
689  void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const
690  {
691  m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...));
692  }
693 
695  void PushNodeVersion(CNode& pnode, const Peer& peer);
696 
701  void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
702 
704  void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
705 
707  void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
708 
716  void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
717 
719  void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
720 
722 
724 
725  const CChainParams& m_chainparams;
726  CConnman& m_connman;
727  AddrMan& m_addrman;
729  BanMan* const m_banman;
730  ChainstateManager& m_chainman;
731  CTxMemPool& m_mempool;
732 
741  Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs);
742  node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex);
743 
744  std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
745 
747  std::atomic<int> m_best_height{-1};
749  std::atomic<std::chrono::seconds> m_best_block_time{0s};
750 
752  std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
753 
754  node::Warnings& m_warnings;
755  TimeOffsets m_outbound_time_offsets{m_warnings};
756 
757  const Options m_opts;
758 
759  bool RejectIncomingTxs(const CNode& peer) const;
760 
763  bool m_initial_sync_finished GUARDED_BY(cs_main){false};
764 
767  mutable Mutex m_peer_mutex;
774  std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
775 
777  std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
778 
780  const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
782  CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
783 
784  uint32_t GetFetchFlags(const Peer& peer) const;
785 
786  std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
787 
789  int nSyncStarted GUARDED_BY(cs_main) = 0;
790 
792  uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
793 
800  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
801 
803  std::atomic<int> m_wtxid_relay_peers{0};
804 
806  int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
807 
809  int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
810 
812  std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
813 
820  std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
821  std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
822 
823 
824  // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
825  Mutex m_most_recent_block_mutex;
826  std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
827  std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
828  uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
829  std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
830 
831  // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
833  Mutex m_headers_presync_mutex;
841  using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
843  std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
845  NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
847  std::atomic_bool m_headers_presync_should_signal{false};
848 
850  int m_highest_fast_announce GUARDED_BY(::cs_main){0};
851 
853  bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
854 
856  bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
857 
865  void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
866 
867  /* Mark a block as in flight
868  * Returns false, still setting pit, if the block was already in flight from the same peer
869  * pit will only be valid as long as the same cs_main lock is being held
870  */
871  bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
872 
873  bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
874 
878  void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
879 
881  void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
882 
910  void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
911 
912  /* Multimap used to preserve insertion order */
913  typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
914  BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
915 
917  std::atomic<std::chrono::seconds> m_last_tip_update{0s};
918 
920  CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
922 
923  void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
924  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
926 
928  void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
929 
931  void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
932  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
933 
940  void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
941 
943  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
944 
946  int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
947 
948  void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
949 
953  std::vector<CTransactionRef> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
955  size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
956 
958  void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
960  void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
961  bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
962 
967  int64_t ApproximateBestBlockDepth() const;
968 
975  bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
976  bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
977  void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
978  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
979 
995  bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
996  BlockFilterType filter_type, uint32_t start_height,
997  const uint256& stop_hash, uint32_t max_height_diff,
998  const CBlockIndex*& stop_index,
999  BlockFilterIndex*& filter_index);
1000 
1010  void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv);
1011 
1021  void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv);
1022 
1032  void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv);
1033 
1040  bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1041 
1042  void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1043  void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1044 };
1045 
1046 const CNodeState* PeerManagerImpl::State(NodeId pnode) const
1047 {
1048  std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1049  if (it == m_node_states.end())
1050  return nullptr;
1051  return &it->second;
1052 }
1053 
1054 CNodeState* PeerManagerImpl::State(NodeId pnode)
1055 {
1056  return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1057 }
1058 
1064 static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1065 {
1066  return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1067 }
1068 
1069 void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1070 {
1071  assert(peer.m_addr_known);
1072  peer.m_addr_known->insert(addr.GetKey());
1073 }
1074 
1075 void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1076 {
1077  // Known checking here is only to save space from duplicates.
1078  // Before sending, we'll filter it again for known addresses that were
1079  // added after addresses were pushed.
1080  assert(peer.m_addr_known);
1081  if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1082  if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1083  peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1084  } else {
1085  peer.m_addrs_to_send.push_back(addr);
1086  }
1087  }
1088 }
1089 
1090 static void AddKnownTx(Peer& peer, const uint256& hash)
1091 {
1092  auto tx_relay = peer.GetTxRelay();
1093  if (!tx_relay) return;
1094 
1095  LOCK(tx_relay->m_tx_inventory_mutex);
1096  tx_relay->m_tx_inventory_known_filter.insert(hash);
1097 }
1098 
1100 static bool CanServeBlocks(const Peer& peer)
1101 {
1102  return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1103 }
1104 
1107 static bool IsLimitedPeer(const Peer& peer)
1108 {
1109  return (!(peer.m_their_services & NODE_NETWORK) &&
1110  (peer.m_their_services & NODE_NETWORK_LIMITED));
1111 }
1112 
1114 static bool CanServeWitnesses(const Peer& peer)
1115 {
1116  return peer.m_their_services & NODE_WITNESS;
1117 }
1118 
1119 std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1120  std::chrono::seconds average_interval)
1121 {
1122  if (m_next_inv_to_inbounds.load() < now) {
1123  // If this function were called from multiple threads simultaneously
1124  // it would possible that both update the next send variable, and return a different result to their caller.
1125  // This is not possible in practice as only the net processing thread invokes this function.
1126  m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval);
1127  }
1128  return m_next_inv_to_inbounds;
1129 }
1130 
1131 bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1132 {
1133  return mapBlocksInFlight.count(hash);
1134 }
1135 
1136 bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1137 {
1138  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1139  auto [nodeid, block_it] = range.first->second;
1140  PeerRef peer{GetPeerRef(nodeid)};
1141  if (peer && !peer->m_is_inbound) return true;
1142  }
1143 
1144  return false;
1145 }
1146 
1147 void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1148 {
1149  auto range = mapBlocksInFlight.equal_range(hash);
1150  if (range.first == range.second) {
1151  // Block was not requested from any peer
1152  return;
1153  }
1154 
1155  // We should not have requested too many of this block
1156  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1157 
1158  while (range.first != range.second) {
1159  auto [node_id, list_it] = range.first->second;
1160 
1161  if (from_peer && *from_peer != node_id) {
1162  range.first++;
1163  continue;
1164  }
1165 
1166  CNodeState& state = *Assert(State(node_id));
1167 
1168  if (state.vBlocksInFlight.begin() == list_it) {
1169  // First block on the queue was received, update the start download time for the next one
1170  state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1171  }
1172  state.vBlocksInFlight.erase(list_it);
1173 
1174  if (state.vBlocksInFlight.empty()) {
1175  // Last validated block on the queue for this peer was received.
1176  m_peers_downloading_from--;
1177  }
1178  state.m_stalling_since = 0us;
1179 
1180  range.first = mapBlocksInFlight.erase(range.first);
1181  }
1182 }
1183 
1184 bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1185 {
1186  const uint256& hash{block.GetBlockHash()};
1187 
1188  CNodeState *state = State(nodeid);
1189  assert(state != nullptr);
1190 
1191  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1192 
1193  // Short-circuit most stuff in case it is from the same node
1194  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1195  if (range.first->second.first == nodeid) {
1196  if (pit) {
1197  *pit = &range.first->second.second;
1198  }
1199  return false;
1200  }
1201  }
1202 
1203  // Make sure it's not being fetched already from same peer.
1204  RemoveBlockRequest(hash, nodeid);
1205 
1206  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1207  {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1208  if (state->vBlocksInFlight.size() == 1) {
1209  // We're starting a block download (batch) from this peer.
1210  state->m_downloading_since = GetTime<std::chrono::microseconds>();
1211  m_peers_downloading_from++;
1212  }
1213  auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1214  if (pit) {
1215  *pit = &itInFlight->second.second;
1216  }
1217  return true;
1218 }
1219 
1220 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1221 {
1223 
1224  // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1225  // mempool will not contain the transactions necessary to reconstruct the
1226  // compact block.
1227  if (m_opts.ignore_incoming_txs) return;
1228 
1229  CNodeState* nodestate = State(nodeid);
1230  PeerRef peer{GetPeerRef(nodeid)};
1231  if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1232  // Don't request compact blocks if the peer has not signalled support
1233  return;
1234  }
1235 
1236  int num_outbound_hb_peers = 0;
1237  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1238  if (*it == nodeid) {
1239  lNodesAnnouncingHeaderAndIDs.erase(it);
1240  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1241  return;
1242  }
1243  PeerRef peer_ref{GetPeerRef(*it)};
1244  if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
1245  }
1246  if (peer && peer->m_is_inbound) {
1247  // If we're adding an inbound HB peer, make sure we're not removing
1248  // our last outbound HB peer in the process.
1249  if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1250  PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
1251  if (remove_peer && !remove_peer->m_is_inbound) {
1252  // Put the HB outbound peer in the second slot, so that it
1253  // doesn't get removed.
1254  std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1255  }
1256  }
1257  }
1258  m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1260  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1261  // As per BIP152, we only get 3 of our peers to announce
1262  // blocks using compact encodings.
1263  m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1264  MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
1265  // save BIP152 bandwidth state: we select peer to be low-bandwidth
1266  pnodeStop->m_bip152_highbandwidth_to = false;
1267  return true;
1268  });
1269  lNodesAnnouncingHeaderAndIDs.pop_front();
1270  }
1271  MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION);
1272  // save BIP152 bandwidth state: we select peer to be high-bandwidth
1273  pfrom->m_bip152_highbandwidth_to = true;
1274  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1275  return true;
1276  });
1277 }
1278 
1279 bool PeerManagerImpl::TipMayBeStale()
1280 {
1282  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1283  if (m_last_tip_update.load() == 0s) {
1284  m_last_tip_update = GetTime<std::chrono::seconds>();
1285  }
1286  return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1287 }
1288 
1289 int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
1290 {
1291  return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
1292 }
1293 
1294 bool PeerManagerImpl::CanDirectFetch()
1295 {
1296  return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1297 }
1298 
1299 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1300 {
1301  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1302  return true;
1303  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1304  return true;
1305  return false;
1306 }
1307 
1308 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1309  CNodeState *state = State(nodeid);
1310  assert(state != nullptr);
1311 
1312  if (!state->hashLastUnknownBlock.IsNull()) {
1313  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1314  if (pindex && pindex->nChainWork > 0) {
1315  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1316  state->pindexBestKnownBlock = pindex;
1317  }
1318  state->hashLastUnknownBlock.SetNull();
1319  }
1320  }
1321 }
1322 
1323 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1324  CNodeState *state = State(nodeid);
1325  assert(state != nullptr);
1326 
1327  ProcessBlockAvailability(nodeid);
1328 
1329  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1330  if (pindex && pindex->nChainWork > 0) {
1331  // An actually better block was announced.
1332  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1333  state->pindexBestKnownBlock = pindex;
1334  }
1335  } else {
1336  // An unknown block was announced; just assume that the latest one is the best one.
1337  state->hashLastUnknownBlock = hash;
1338  }
1339 }
1340 
1341 // Logic for calculating which blocks to download from a given peer, given our current tip.
1342 void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1343 {
1344  if (count == 0)
1345  return;
1346 
1347  vBlocks.reserve(vBlocks.size() + count);
1348  CNodeState *state = State(peer.m_id);
1349  assert(state != nullptr);
1350 
1351  // Make sure pindexBestKnownBlock is up to date, we'll need it.
1352  ProcessBlockAvailability(peer.m_id);
1353 
1354  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1355  // This peer has nothing interesting.
1356  return;
1357  }
1358 
1359  // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort:
1360  // We can't reorg to this chain due to missing undo data until the background sync has finished,
1361  // so downloading blocks from it would be futile.
1362  const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()};
1363  if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) {
1364  LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id);
1365  return;
1366  }
1367 
1368  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1369  // Guessing wrong in either direction is not a problem.
1370  // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
1371  if (state->pindexLastCommonBlock == nullptr ||
1372  (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
1373  state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1374  }
1375 
1376  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1377  // of its current tip anymore. Go back enough to fix that.
1378  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1379  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1380  return;
1381 
1382  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1383  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1384  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1385  // download that next block if the window were 1 larger.
1386  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1387 
1388  FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1389 }
1390 
1391 void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1392 {
1393  Assert(from_tip);
1394  Assert(target_block);
1395 
1396  if (vBlocks.size() >= count) {
1397  return;
1398  }
1399 
1400  vBlocks.reserve(count);
1401  CNodeState *state = Assert(State(peer.m_id));
1402 
1403  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1404  // This peer can't provide us the complete series of blocks leading up to the
1405  // assumeutxo snapshot base.
1406  //
1407  // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1408  // will eventually crash when we try to reorg to it. Let other logic
1409  // deal with whether we disconnect this peer.
1410  //
1411  // TODO at some point in the future, we might choose to request what blocks
1412  // this peer does have from the historical chain, despite it not having a
1413  // complete history beneath the snapshot base.
1414  return;
1415  }
1416 
1417  FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1418 }
1419 
1420 void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1421 {
1422  std::vector<const CBlockIndex*> vToFetch;
1423  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1424  bool is_limited_peer = IsLimitedPeer(peer);
1425  NodeId waitingfor = -1;
1426  while (pindexWalk->nHeight < nMaxHeight) {
1427  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1428  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1429  // as iterating over ~100 CBlockIndex* entries anyway.
1430  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1431  vToFetch.resize(nToFetch);
1432  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1433  vToFetch[nToFetch - 1] = pindexWalk;
1434  for (unsigned int i = nToFetch - 1; i > 0; i--) {
1435  vToFetch[i - 1] = vToFetch[i]->pprev;
1436  }
1437 
1438  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1439  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1440  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1441  // already part of our chain (and therefore don't need it even if pruned).
1442  for (const CBlockIndex* pindex : vToFetch) {
1443  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1444  // We consider the chain that this peer is on invalid.
1445  return;
1446  }
1447 
1448  if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1449  // We wouldn't download this block or its descendants from this peer.
1450  return;
1451  }
1452 
1453  if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1454  if (activeChain && pindex->HaveNumChainTxs()) {
1455  state->pindexLastCommonBlock = pindex;
1456  }
1457  continue;
1458  }
1459 
1460  // Is block in-flight?
1461  if (IsBlockRequested(pindex->GetBlockHash())) {
1462  if (waitingfor == -1) {
1463  // This is the first already-in-flight block.
1464  waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1465  }
1466  continue;
1467  }
1468 
1469  // The block is not already downloaded, and not yet in flight.
1470  if (pindex->nHeight > nWindowEnd) {
1471  // We reached the end of the window.
1472  if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1473  // We aren't able to fetch anything, but we would be if the download window was one larger.
1474  if (nodeStaller) *nodeStaller = waitingfor;
1475  }
1476  return;
1477  }
1478 
1479  // Don't request blocks that go further than what limited peers can provide
1480  if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) {
1481  continue;
1482  }
1483 
1484  vBlocks.push_back(pindex);
1485  if (vBlocks.size() == count) {
1486  return;
1487  }
1488  }
1489  }
1490 }
1491 
1492 } // namespace
1493 
1494 void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1495 {
1496  uint64_t my_services{peer.m_our_services};
1497  const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1498  uint64_t nonce = pnode.GetLocalNonce();
1499  const int nNodeStartingHeight{m_best_height};
1500  NodeId nodeid = pnode.GetId();
1501  CAddress addr = pnode.addr;
1502 
1503  CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1504  uint64_t your_services{addr.nServices};
1505 
1506  const bool tx_relay{!RejectIncomingTxs(pnode)};
1507  MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1508  your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1509  my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1510  nonce, strSubVersion, nNodeStartingHeight, tx_relay);
1511 
1512  if (fLogIPs) {
1513  LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1514  } else {
1515  LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1516  }
1517 }
1518 
1519 void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1520 {
1521  LOCK(cs_main);
1522  CNodeState *state = State(node);
1523  if (state) state->m_last_block_announcement = time_in_seconds;
1524 }
1525 
1526 void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services)
1527 {
1528  NodeId nodeid = node.GetId();
1529  {
1530  LOCK(cs_main); // For m_node_states
1531  m_node_states.try_emplace(m_node_states.end(), nodeid);
1532  }
1533  WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid));
1534 
1536  our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
1537  }
1538 
1539  PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn());
1540  {
1541  LOCK(m_peer_mutex);
1542  m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1543  }
1544 }
1545 
1546 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1547 {
1548  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1549 
1550  for (const auto& txid : unbroadcast_txids) {
1551  CTransactionRef tx = m_mempool.get(txid);
1552 
1553  if (tx != nullptr) {
1554  RelayTransaction(txid, tx->GetWitnessHash());
1555  } else {
1556  m_mempool.RemoveUnbroadcastTx(txid, true);
1557  }
1558  }
1559 
1560  // Schedule next run for 10-15 minutes in the future.
1561  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1562  const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1563  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1564 }
1565 
1566 void PeerManagerImpl::FinalizeNode(const CNode& node)
1567 {
1568  NodeId nodeid = node.GetId();
1569  {
1570  LOCK(cs_main);
1571  {
1572  // We remove the PeerRef from g_peer_map here, but we don't always
1573  // destruct the Peer. Sometimes another thread is still holding a
1574  // PeerRef, so the refcount is >= 1. Be careful not to do any
1575  // processing here that assumes Peer won't be changed before it's
1576  // destructed.
1577  PeerRef peer = RemovePeer(nodeid);
1578  assert(peer != nullptr);
1579  m_wtxid_relay_peers -= peer->m_wtxid_relay;
1580  assert(m_wtxid_relay_peers >= 0);
1581  }
1582  CNodeState *state = State(nodeid);
1583  assert(state != nullptr);
1584 
1585  if (state->fSyncStarted)
1586  nSyncStarted--;
1587 
1588  for (const QueuedBlock& entry : state->vBlocksInFlight) {
1589  auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1590  while (range.first != range.second) {
1591  auto [node_id, list_it] = range.first->second;
1592  if (node_id != nodeid) {
1593  range.first++;
1594  } else {
1595  range.first = mapBlocksInFlight.erase(range.first);
1596  }
1597  }
1598  }
1599  {
1600  LOCK(m_tx_download_mutex);
1601  m_txdownloadman.DisconnectedPeer(nodeid);
1602  }
1603  if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1604  m_num_preferred_download_peers -= state->fPreferredDownload;
1605  m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1606  assert(m_peers_downloading_from >= 0);
1607  m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1608  assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1609 
1610  m_node_states.erase(nodeid);
1611 
1612  if (m_node_states.empty()) {
1613  // Do a consistency check after the last peer is removed.
1614  assert(mapBlocksInFlight.empty());
1615  assert(m_num_preferred_download_peers == 0);
1616  assert(m_peers_downloading_from == 0);
1617  assert(m_outbound_peers_with_protect_from_disconnect == 0);
1618  assert(m_wtxid_relay_peers == 0);
1619  WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty());
1620  }
1621  } // cs_main
1622  if (node.fSuccessfullyConnected &&
1623  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1624  // Only change visible addrman state for full outbound peers. We don't
1625  // call Connected() for feeler connections since they don't have
1626  // fSuccessfullyConnected set.
1627  m_addrman.Connected(node.addr);
1628  }
1629  {
1630  LOCK(m_headers_presync_mutex);
1631  m_headers_presync_stats.erase(nodeid);
1632  }
1633  LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1634 }
1635 
1636 bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
1637 {
1638  // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
1639  return !(GetDesirableServiceFlags(services) & (~services));
1640 }
1641 
1642 ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
1643 {
1644  if (services & NODE_NETWORK_LIMITED) {
1645  // Limited peers are desirable when we are close to the tip.
1646  if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
1648  }
1649  }
1651 }
1652 
1653 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1654 {
1655  LOCK(m_peer_mutex);
1656  auto it = m_peer_map.find(id);
1657  return it != m_peer_map.end() ? it->second : nullptr;
1658 }
1659 
1660 PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1661 {
1662  PeerRef ret;
1663  LOCK(m_peer_mutex);
1664  auto it = m_peer_map.find(id);
1665  if (it != m_peer_map.end()) {
1666  ret = std::move(it->second);
1667  m_peer_map.erase(it);
1668  }
1669  return ret;
1670 }
1671 
1672 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1673 {
1674  {
1675  LOCK(cs_main);
1676  const CNodeState* state = State(nodeid);
1677  if (state == nullptr)
1678  return false;
1679  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1680  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1681  for (const QueuedBlock& queue : state->vBlocksInFlight) {
1682  if (queue.pindex)
1683  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1684  }
1685  }
1686 
1687  PeerRef peer = GetPeerRef(nodeid);
1688  if (peer == nullptr) return false;
1689  stats.their_services = peer->m_their_services;
1690  stats.m_starting_height = peer->m_starting_height;
1691  // It is common for nodes with good ping times to suddenly become lagged,
1692  // due to a new block arriving or other large transfer.
1693  // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1694  // since pingtime does not update until the ping is complete, which might take a while.
1695  // So, if a ping is taking an unusually long time in flight,
1696  // the caller can immediately detect that this is happening.
1697  auto ping_wait{0us};
1698  if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1699  ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1700  }
1701 
1702  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1703  stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1704  stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1705  } else {
1706  stats.m_relay_txs = false;
1707  stats.m_fee_filter_received = 0;
1708  }
1709 
1710  stats.m_ping_wait = ping_wait;
1711  stats.m_addr_processed = peer->m_addr_processed.load();
1712  stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1713  stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1714  {
1715  LOCK(peer->m_headers_sync_mutex);
1716  if (peer->m_headers_sync) {
1717  stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1718  }
1719  }
1720  stats.time_offset = peer->m_time_offset;
1721 
1722  return true;
1723 }
1724 
1725 std::vector<TxOrphanage::OrphanTxBase> PeerManagerImpl::GetOrphanTransactions()
1726 {
1727  LOCK(m_tx_download_mutex);
1728  return m_txdownloadman.GetOrphanTransactions();
1729 }
1730 
1732 {
1733  return PeerManagerInfo{
1734  .median_outbound_time_offset = m_outbound_time_offsets.Median(),
1735  .ignores_incoming_txs = m_opts.ignore_incoming_txs,
1736  };
1737 }
1738 
1739 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1740 {
1741  if (m_opts.max_extra_txs <= 0)
1742  return;
1743  if (!vExtraTxnForCompact.size())
1744  vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1745  vExtraTxnForCompact[vExtraTxnForCompactIt] = tx;
1746  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1747 }
1748 
1749 void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message)
1750 {
1751  LOCK(peer.m_misbehavior_mutex);
1752 
1753  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1754  peer.m_should_discourage = true;
1755  LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed);
1756  TRACEPOINT(net, misbehaving_connection,
1757  peer.m_id,
1758  message.c_str()
1759  );
1760 }
1761 
1762 void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1763  bool via_compact_block, const std::string& message)
1764 {
1765  PeerRef peer{GetPeerRef(nodeid)};
1766  switch (state.GetResult()) {
1768  break;
1770  // We didn't try to process the block because the header chain may have
1771  // too little work.
1772  break;
1773  // The node is providing invalid data:
1776  if (!via_compact_block) {
1777  if (peer) Misbehaving(*peer, message);
1778  return;
1779  }
1780  break;
1782  {
1783  // Discourage outbound (but not inbound) peers if on an invalid chain.
1784  // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1785  if (peer && !via_compact_block && !peer->m_is_inbound) {
1786  if (peer) Misbehaving(*peer, message);
1787  return;
1788  }
1789  break;
1790  }
1794  if (peer) Misbehaving(*peer, message);
1795  return;
1796  // Conflicting (but not necessarily invalid) data or different policy:
1798  if (peer) Misbehaving(*peer, message);
1799  return;
1801  break;
1802  }
1803  if (message != "") {
1804  LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1805  }
1806 }
1807 
1808 void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
1809 {
1810  PeerRef peer{GetPeerRef(nodeid)};
1811  switch (state.GetResult()) {
1813  break;
1814  // The node is providing invalid data:
1816  if (peer) Misbehaving(*peer, "");
1817  return;
1818  // Conflicting (but not necessarily invalid) data or different policy:
1830  break;
1831  }
1832 }
1833 
1834 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1835 {
1837  if (m_chainman.ActiveChain().Contains(pindex)) return true;
1838  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1839  (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1840  (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1841 }
1842 
1843 std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1844 {
1845  if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
1846 
1847  // Ensure this peer exists and hasn't been disconnected
1848  PeerRef peer = GetPeerRef(peer_id);
1849  if (peer == nullptr) return "Peer does not exist";
1850 
1851  // Ignore pre-segwit peers
1852  if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
1853 
1854  LOCK(cs_main);
1855 
1856  // Forget about all prior requests
1857  RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1858 
1859  // Mark block as in-flight
1860  if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
1861 
1862  // Construct message to request the block
1863  const uint256& hash{block_index.GetBlockHash()};
1864  std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1865 
1866  // Send block request message to the peer
1867  bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1868  this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
1869  return true;
1870  });
1871 
1872  if (!success) return "Peer not fully connected";
1873 
1874  LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n",
1875  hash.ToString(), peer_id);
1876  return std::nullopt;
1877 }
1878 
1879 std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1880  BanMan* banman, ChainstateManager& chainman,
1881  CTxMemPool& pool, node::Warnings& warnings, Options opts)
1882 {
1883  return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
1884 }
1885 
1886 PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
1887  BanMan* banman, ChainstateManager& chainman,
1888  CTxMemPool& pool, node::Warnings& warnings, Options opts)
1889  : m_rng{opts.deterministic_rng},
1890  m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
1891  m_chainparams(chainman.GetParams()),
1892  m_connman(connman),
1893  m_addrman(addrman),
1894  m_banman(banman),
1895  m_chainman(chainman),
1896  m_mempool(pool),
1897  m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.max_orphan_txs, opts.deterministic_rng}),
1898  m_warnings{warnings},
1899  m_opts{opts}
1900 {
1901  // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
1902  // This argument can go away after Erlay support is complete.
1903  if (opts.reconcile_txs) {
1904  m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
1905  }
1906 }
1907 
1908 void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
1909 {
1910  // Stale tip checking and peer eviction are on two different timers, but we
1911  // don't want them to get out of sync due to drift in the scheduler, so we
1912  // combine them in one function and schedule at the quicker (peer-eviction)
1913  // timer.
1914  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1915  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1916 
1917  // schedule next run for 10-15 minutes in the future
1918  const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1919  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1920 }
1921 
1922 void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd)
1923 {
1924  // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding
1925  // m_tx_download_mutex waits on the mempool mutex.
1926  AssertLockNotHeld(m_mempool.cs);
1927  AssertLockNotHeld(m_tx_download_mutex);
1928 
1929  if (!is_ibd) {
1930  LOCK(m_tx_download_mutex);
1931  // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due
1932  // to a timelock. Reset the rejection filters to give those transactions another chance if we
1933  // see them again.
1934  m_txdownloadman.ActiveTipChange();
1935  }
1936 }
1937 
1944 void PeerManagerImpl::BlockConnected(
1945  ChainstateRole role,
1946  const std::shared_ptr<const CBlock>& pblock,
1947  const CBlockIndex* pindex)
1948 {
1949  // Update this for all chainstate roles so that we don't mistakenly see peers
1950  // helping us do background IBD as having a stale tip.
1951  m_last_tip_update = GetTime<std::chrono::seconds>();
1952 
1953  // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
1954  auto stalling_timeout = m_block_stalling_timeout.load();
1955  Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
1956  if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
1957  const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
1958  if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1959  LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
1960  }
1961  }
1962 
1963  // The following task can be skipped since we don't maintain a mempool for
1964  // the ibd/background chainstate.
1965  if (role == ChainstateRole::BACKGROUND) {
1966  return;
1967  }
1968  LOCK(m_tx_download_mutex);
1969  m_txdownloadman.BlockConnected(pblock);
1970 }
1971 
1972 void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1973 {
1974  LOCK(m_tx_download_mutex);
1975  m_txdownloadman.BlockDisconnected();
1976 }
1977 
1982 void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
1983 {
1984  auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64());
1985 
1986  LOCK(cs_main);
1987 
1988  if (pindex->nHeight <= m_highest_fast_announce)
1989  return;
1990  m_highest_fast_announce = pindex->nHeight;
1991 
1992  if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
1993 
1994  uint256 hashBlock(pblock->GetHash());
1995  const std::shared_future<CSerializedNetMsg> lazy_ser{
1996  std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
1997 
1998  {
1999  auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
2000  for (const auto& tx : pblock->vtx) {
2001  most_recent_block_txs->emplace(tx->GetHash(), tx);
2002  most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2003  }
2004 
2005  LOCK(m_most_recent_block_mutex);
2006  m_most_recent_block_hash = hashBlock;
2007  m_most_recent_block = pblock;
2008  m_most_recent_compact_block = pcmpctblock;
2009  m_most_recent_block_txs = std::move(most_recent_block_txs);
2010  }
2011 
2012  m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2014 
2015  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
2016  return;
2017  ProcessBlockAvailability(pnode->GetId());
2018  CNodeState &state = *State(pnode->GetId());
2019  // If the peer has, or we announced to them the previous block already,
2020  // but we don't think they have this one, go ahead and announce it
2021  if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2022 
2023  LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2024  hashBlock.ToString(), pnode->GetId());
2025 
2026  const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2027  PushMessage(*pnode, ser_cmpctblock.Copy());
2028  state.pindexBestHeaderSent = pindex;
2029  }
2030  });
2031 }
2032 
2037 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2038 {
2039  SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2040 
2041  // Don't relay inventory during initial block download.
2042  if (fInitialDownload) return;
2043 
2044  // Find the hashes of all blocks that weren't previously in the best chain.
2045  std::vector<uint256> vHashes;
2046  const CBlockIndex *pindexToAnnounce = pindexNew;
2047  while (pindexToAnnounce != pindexFork) {
2048  vHashes.push_back(pindexToAnnounce->GetBlockHash());
2049  pindexToAnnounce = pindexToAnnounce->pprev;
2050  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2051  // Limit announcements in case of a huge reorganization.
2052  // Rely on the peer's synchronization mechanism in that case.
2053  break;
2054  }
2055  }
2056 
2057  {
2058  LOCK(m_peer_mutex);
2059  for (auto& it : m_peer_map) {
2060  Peer& peer = *it.second;
2061  LOCK(peer.m_block_inv_mutex);
2062  for (const uint256& hash : vHashes | std::views::reverse) {
2063  peer.m_blocks_for_headers_relay.push_back(hash);
2064  }
2065  }
2066  }
2067 
2068  m_connman.WakeMessageHandler();
2069 }
2070 
2075 void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
2076 {
2077  LOCK(cs_main);
2078 
2079  const uint256 hash(block.GetHash());
2080  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2081 
2082  // If the block failed validation, we know where it came from and we're still connected
2083  // to that peer, maybe punish.
2084  if (state.IsInvalid() &&
2085  it != mapBlockSource.end() &&
2086  State(it->second.first)) {
2087  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2088  }
2089  // Check that:
2090  // 1. The block is valid
2091  // 2. We're not in initial block download
2092  // 3. This is currently the best block we're aware of. We haven't updated
2093  // the tip yet so we have no way to check this directly here. Instead we
2094  // just check that there are currently no other blocks in flight.
2095  else if (state.IsValid() &&
2096  !m_chainman.IsInitialBlockDownload() &&
2097  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2098  if (it != mapBlockSource.end()) {
2099  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2100  }
2101  }
2102  if (it != mapBlockSource.end())
2103  mapBlockSource.erase(it);
2104 }
2105 
2107 //
2108 // Messages
2109 //
2110 
2111 bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2112 {
2113  return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2114 }
2115 
2116 void PeerManagerImpl::SendPings()
2117 {
2118  LOCK(m_peer_mutex);
2119  for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2120 }
2121 
2122 void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2123 {
2124  LOCK(m_peer_mutex);
2125  for(auto& it : m_peer_map) {
2126  Peer& peer = *it.second;
2127  auto tx_relay = peer.GetTxRelay();
2128  if (!tx_relay) continue;
2129 
2130  LOCK(tx_relay->m_tx_inventory_mutex);
2131  // Only queue transactions for announcement once the version handshake
2132  // is completed. The time of arrival for these transactions is
2133  // otherwise at risk of leaking to a spy, if the spy is able to
2134  // distinguish transactions received during the handshake from the rest
2135  // in the announcement.
2136  if (tx_relay->m_next_inv_send_time == 0s) continue;
2137 
2138  const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2139  if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2140  tx_relay->m_tx_inventory_to_send.insert(hash);
2141  }
2142  };
2143 }
2144 
2145 void PeerManagerImpl::RelayAddress(NodeId originator,
2146  const CAddress& addr,
2147  bool fReachable)
2148 {
2149  // We choose the same nodes within a given 24h window (if the list of connected
2150  // nodes does not change) and we don't relay to nodes that already know an
2151  // address. So within 24h we will likely relay a given address once. This is to
2152  // prevent a peer from unjustly giving their address better propagation by sending
2153  // it to us repeatedly.
2154 
2155  if (!fReachable && !addr.IsRelayable()) return;
2156 
2157  // Relay to a limited number of other nodes
2158  // Use deterministic randomness to send to the same nodes for 24 hours
2159  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2160  const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2161  const auto current_time{GetTime<std::chrono::seconds>()};
2162  // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2163  const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2165  .Write(hash_addr)
2166  .Write(time_addr)};
2167 
2168  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2169  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2170 
2171  std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2172  assert(nRelayNodes <= best.size());
2173 
2174  LOCK(m_peer_mutex);
2175 
2176  for (auto& [id, peer] : m_peer_map) {
2177  if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2178  uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2179  for (unsigned int i = 0; i < nRelayNodes; i++) {
2180  if (hashKey > best[i].first) {
2181  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2182  best[i] = std::make_pair(hashKey, peer.get());
2183  break;
2184  }
2185  }
2186  }
2187  };
2188 
2189  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2190  PushAddress(*best[i].second, addr);
2191  }
2192 }
2193 
2194 void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2195 {
2196  std::shared_ptr<const CBlock> a_recent_block;
2197  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2198  {
2199  LOCK(m_most_recent_block_mutex);
2200  a_recent_block = m_most_recent_block;
2201  a_recent_compact_block = m_most_recent_compact_block;
2202  }
2203 
2204  bool need_activate_chain = false;
2205  {
2206  LOCK(cs_main);
2207  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2208  if (pindex) {
2209  if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2210  pindex->IsValid(BLOCK_VALID_TREE)) {
2211  // If we have the block and all of its parents, but have not yet validated it,
2212  // we might be in the middle of connecting it (ie in the unlock of cs_main
2213  // before ActivateBestChain but after AcceptBlock).
2214  // In this case, we need to run ActivateBestChain prior to checking the relay
2215  // conditions below.
2216  need_activate_chain = true;
2217  }
2218  }
2219  } // release cs_main before calling ActivateBestChain
2220  if (need_activate_chain) {
2221  BlockValidationState state;
2222  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2223  LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2224  }
2225  }
2226 
2227  const CBlockIndex* pindex{nullptr};
2228  const CBlockIndex* tip{nullptr};
2229  bool can_direct_fetch{false};
2230  FlatFilePos block_pos{};
2231  {
2232  LOCK(cs_main);
2233  pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2234  if (!pindex) {
2235  return;
2236  }
2237  if (!BlockRequestAllowed(pindex)) {
2238  LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2239  return;
2240  }
2241  // disconnect node in case we have reached the outbound limit for serving historical blocks
2242  if (m_connman.OutboundTargetReached(true) &&
2243  (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2244  !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2245  ) {
2246  LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
2247  pfrom.fDisconnect = true;
2248  return;
2249  }
2250  tip = m_chainman.ActiveChain().Tip();
2251  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2252  if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
2253  (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2254  )) {
2255  LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs));
2256  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2257  pfrom.fDisconnect = true;
2258  return;
2259  }
2260  // Pruned nodes may have deleted the block, so check whether
2261  // it's available before trying to send.
2262  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2263  return;
2264  }
2265  can_direct_fetch = CanDirectFetch();
2266  block_pos = pindex->GetBlockPos();
2267  }
2268 
2269  std::shared_ptr<const CBlock> pblock;
2270  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2271  pblock = a_recent_block;
2272  } else if (inv.IsMsgWitnessBlk()) {
2273  // Fast-path: in this case it is possible to serve the block directly from disk,
2274  // as the network format matches the format on disk
2275  std::vector<uint8_t> block_data;
2276  if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) {
2277  if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2278  LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2279  } else {
2280  LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2281  }
2282  pfrom.fDisconnect = true;
2283  return;
2284  }
2285  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data});
2286  // Don't set pblock as we've sent the block
2287  } else {
2288  // Send block from disk
2289  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2290  if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) {
2291  if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2292  LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2293  } else {
2294  LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2295  }
2296  pfrom.fDisconnect = true;
2297  return;
2298  }
2299  pblock = pblockRead;
2300  }
2301  if (pblock) {
2302  if (inv.IsMsgBlk()) {
2303  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock));
2304  } else if (inv.IsMsgWitnessBlk()) {
2305  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2306  } else if (inv.IsMsgFilteredBlk()) {
2307  bool sendMerkleBlock = false;
2308  CMerkleBlock merkleBlock;
2309  if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2310  LOCK(tx_relay->m_bloom_filter_mutex);
2311  if (tx_relay->m_bloom_filter) {
2312  sendMerkleBlock = true;
2313  merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2314  }
2315  }
2316  if (sendMerkleBlock) {
2317  MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
2318  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2319  // This avoids hurting performance by pointlessly requiring a round-trip
2320  // Note that there is currently no way for a node to request any single transactions we didn't send here -
2321  // they must either disconnect and retry or request the full block.
2322  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2323  // however we MUST always provide at least what the remote peer needs
2324  typedef std::pair<unsigned int, uint256> PairType;
2325  for (PairType& pair : merkleBlock.vMatchedTxn)
2326  MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first]));
2327  }
2328  // else
2329  // no response
2330  } else if (inv.IsMsgCmpctBlk()) {
2331  // If a peer is asking for old blocks, we're almost guaranteed
2332  // they won't have a useful mempool to match against a compact block,
2333  // and we don't feel like constructing the object for them, so
2334  // instead we respond with the full, non-compact block.
2335  if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
2336  if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2337  MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
2338  } else {
2339  CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()};
2340  MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock);
2341  }
2342  } else {
2343  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2344  }
2345  }
2346  }
2347 
2348  {
2349  LOCK(peer.m_block_inv_mutex);
2350  // Trigger the peer node to send a getblocks request for the next batch of inventory
2351  if (inv.hash == peer.m_continuation_block) {
2352  // Send immediately. This must send even if redundant,
2353  // and we want it right after the last block so they don't
2354  // wait for other stuff first.
2355  std::vector<CInv> vInv;
2356  vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash());
2357  MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
2358  peer.m_continuation_block.SetNull();
2359  }
2360  }
2361 }
2362 
2363 CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2364 {
2365  // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2366  auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2367  if (txinfo.tx) {
2368  return std::move(txinfo.tx);
2369  }
2370 
2371  // Or it might be from the most recent block
2372  {
2373  LOCK(m_most_recent_block_mutex);
2374  if (m_most_recent_block_txs != nullptr) {
2375  auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2376  if (it != m_most_recent_block_txs->end()) return it->second;
2377  }
2378  }
2379 
2380  return {};
2381 }
2382 
2383 void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2384 {
2386 
2387  auto tx_relay = peer.GetTxRelay();
2388 
2389  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2390  std::vector<CInv> vNotFound;
2391 
2392  // Process as many TX items from the front of the getdata queue as
2393  // possible, since they're common and it's efficient to batch process
2394  // them.
2395  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2396  if (interruptMsgProc) return;
2397  // The send buffer provides backpressure. If there's no space in
2398  // the buffer, pause processing until the next call.
2399  if (pfrom.fPauseSend) break;
2400 
2401  const CInv &inv = *it++;
2402 
2403  if (tx_relay == nullptr) {
2404  // Ignore GETDATA requests for transactions from block-relay-only
2405  // peers and peers that asked us not to announce transactions.
2406  continue;
2407  }
2408 
2409  CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2410  if (tx) {
2411  // WTX and WITNESS_TX imply we serialize with witness
2412  const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS);
2413  MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx));
2414  m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2415  } else {
2416  vNotFound.push_back(inv);
2417  }
2418  }
2419 
2420  // Only process one BLOCK item per call, since they're uncommon and can be
2421  // expensive to process.
2422  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2423  const CInv &inv = *it++;
2424  if (inv.IsGenBlkMsg()) {
2425  ProcessGetBlockData(pfrom, peer, inv);
2426  }
2427  // else: If the first item on the queue is an unknown type, we erase it
2428  // and continue processing the queue on the next call.
2429  }
2430 
2431  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2432 
2433  if (!vNotFound.empty()) {
2434  // Let the peer know that we didn't find what it asked for, so it doesn't
2435  // have to wait around forever.
2436  // SPV clients care about this message: it's needed when they are
2437  // recursively walking the dependencies of relevant unconfirmed
2438  // transactions. SPV clients want to do that because they want to know
2439  // about (and store and rebroadcast and risk analyze) the dependencies
2440  // of transactions relevant to them, without having to download the
2441  // entire memory pool.
2442  // Also, other nodes can use these messages to automatically request a
2443  // transaction from some other peer that announced it, and stop
2444  // waiting for us to respond.
2445  // In normal operation, we often send NOTFOUND messages for parents of
2446  // transactions that we relay; if a peer is missing a parent, they may
2447  // assume we have them and request the parents from us.
2448  MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
2449  }
2450 }
2451 
2452 uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2453 {
2454  uint32_t nFetchFlags = 0;
2455  if (CanServeWitnesses(peer)) {
2456  nFetchFlags |= MSG_WITNESS_FLAG;
2457  }
2458  return nFetchFlags;
2459 }
2460 
2461 void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2462 {
2463  BlockTransactions resp(req);
2464  for (size_t i = 0; i < req.indexes.size(); i++) {
2465  if (req.indexes[i] >= block.vtx.size()) {
2466  Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
2467  return;
2468  }
2469  resp.txn[i] = block.vtx[req.indexes[i]];
2470  }
2471 
2472  MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
2473 }
2474 
2475 bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2476 {
2477  // Do these headers have proof-of-work matching what's claimed?
2478  if (!HasValidProofOfWork(headers, consensusParams)) {
2479  Misbehaving(peer, "header with invalid proof of work");
2480  return false;
2481  }
2482 
2483  // Are these headers connected to each other?
2484  if (!CheckHeadersAreContinuous(headers)) {
2485  Misbehaving(peer, "non-continuous headers sequence");
2486  return false;
2487  }
2488  return true;
2489 }
2490 
2491 arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2492 {
2493  arith_uint256 near_chaintip_work = 0;
2494  LOCK(cs_main);
2495  if (m_chainman.ActiveChain().Tip() != nullptr) {
2496  const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2497  // Use a 144 block buffer, so that we'll accept headers that fork from
2498  // near our tip.
2499  near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2500  }
2501  return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2502 }
2503 
2510 void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer,
2511  const std::vector<CBlockHeader>& headers)
2512 {
2513  // Try to fill in the missing headers.
2514  const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2515  if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2516  LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
2517  headers[0].GetHash().ToString(),
2518  headers[0].hashPrevBlock.ToString(),
2519  best_header->nHeight,
2520  pfrom.GetId());
2521  }
2522 
2523  // Set hashLastUnknownBlock for this peer, so that if we
2524  // eventually get the headers - even from a different peer -
2525  // we can use this peer to download.
2526  WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2527 }
2528 
2529 bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2530 {
2531  uint256 hashLastBlock;
2532  for (const CBlockHeader& header : headers) {
2533  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2534  return false;
2535  }
2536  hashLastBlock = header.GetHash();
2537  }
2538  return true;
2539 }
2540 
2541 bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2542 {
2543  if (peer.m_headers_sync) {
2544  auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
2545  // If it is a valid continuation, we should treat the existing getheaders request as responded to.
2546  if (result.success) peer.m_last_getheaders_timestamp = {};
2547  if (result.request_more) {
2548  auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2549  // If we were instructed to ask for a locator, it should not be empty.
2550  Assume(!locator.vHave.empty());
2551  // We can only be instructed to request more if processing was successful.
2552  Assume(result.success);
2553  if (!locator.vHave.empty()) {
2554  // It should be impossible for the getheaders request to fail,
2555  // because we just cleared the last getheaders timestamp.
2556  bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2557  Assume(sent_getheaders);
2558  LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2559  locator.vHave.front().ToString(), pfrom.GetId());
2560  }
2561  }
2562 
2563  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2564  peer.m_headers_sync.reset(nullptr);
2565 
2566  // Delete this peer's entry in m_headers_presync_stats.
2567  // If this is m_headers_presync_bestpeer, it will be replaced later
2568  // by the next peer that triggers the else{} branch below.
2569  LOCK(m_headers_presync_mutex);
2570  m_headers_presync_stats.erase(pfrom.GetId());
2571  } else {
2572  // Build statistics for this peer's sync.
2573  HeadersPresyncStats stats;
2574  stats.first = peer.m_headers_sync->GetPresyncWork();
2575  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2576  stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2577  peer.m_headers_sync->GetPresyncTime()};
2578  }
2579 
2580  // Update statistics in stats.
2581  LOCK(m_headers_presync_mutex);
2582  m_headers_presync_stats[pfrom.GetId()] = stats;
2583  auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2584  bool best_updated = false;
2585  if (best_it == m_headers_presync_stats.end()) {
2586  // If the cached best peer is outdated, iterate over all remaining ones (including
2587  // newly updated one) to find the best one.
2588  NodeId peer_best{-1};
2589  const HeadersPresyncStats* stat_best{nullptr};
2590  for (const auto& [peer, stat] : m_headers_presync_stats) {
2591  if (!stat_best || stat > *stat_best) {
2592  peer_best = peer;
2593  stat_best = &stat;
2594  }
2595  }
2596  m_headers_presync_bestpeer = peer_best;
2597  best_updated = (peer_best == pfrom.GetId());
2598  } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2599  // pfrom was and remains the best peer, or pfrom just became best.
2600  m_headers_presync_bestpeer = pfrom.GetId();
2601  best_updated = true;
2602  }
2603  if (best_updated && stats.second.has_value()) {
2604  // If the best peer updated, and it is in its first phase, signal.
2605  m_headers_presync_should_signal = true;
2606  }
2607  }
2608 
2609  if (result.success) {
2610  // We only overwrite the headers passed in if processing was
2611  // successful.
2612  headers.swap(result.pow_validated_headers);
2613  }
2614 
2615  return result.success;
2616  }
2617  // Either we didn't have a sync in progress, or something went wrong
2618  // processing these headers, or we are returning headers to the caller to
2619  // process.
2620  return false;
2621 }
2622 
2623 bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2624 {
2625  // Calculate the claimed total work on this chain.
2626  arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers);
2627 
2628  // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2629  // before we'll store it)
2630  arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2631 
2632  // Avoid DoS via low-difficulty-headers by only processing if the headers
2633  // are part of a chain with sufficient work.
2634  if (total_work < minimum_chain_work) {
2635  // Only try to sync with this peer if their headers message was full;
2636  // otherwise they don't have more headers after this so no point in
2637  // trying to sync their too-little-work chain.
2638  if (headers.size() == m_opts.max_headers_result) {
2639  // Note: we could advance to the last header in this set that is
2640  // known to us, rather than starting at the first header (which we
2641  // may already have); however this is unlikely to matter much since
2642  // ProcessHeadersMessage() already handles the case where all
2643  // headers in a received message are already known and are
2644  // ancestors of m_best_header or chainActive.Tip(), by skipping
2645  // this logic in that case. So even if the first header in this set
2646  // of headers is known, some header in this set must be new, so
2647  // advancing to the first unknown header would be a small effect.
2648  LOCK(peer.m_headers_sync_mutex);
2649  peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2650  chain_start_header, minimum_chain_work));
2651 
2652  // Now a HeadersSyncState object for tracking this synchronization
2653  // is created, process the headers using it as normal. Failures are
2654  // handled inside of IsContinuationOfLowWorkHeadersSync.
2655  (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2656  } else {
2657  LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2658  }
2659 
2660  // The peer has not yet given us a chain that meets our work threshold,
2661  // so we want to prevent further processing of the headers in any case.
2662  headers = {};
2663  return true;
2664  }
2665 
2666  return false;
2667 }
2668 
2669 bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2670 {
2671  if (header == nullptr) {
2672  return false;
2673  } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2674  return true;
2675  } else if (m_chainman.ActiveChain().Contains(header)) {
2676  return true;
2677  }
2678  return false;
2679 }
2680 
2681 bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2682 {
2683  const auto current_time = NodeClock::now();
2684 
2685  // Only allow a new getheaders message to go out if we don't have a recent
2686  // one already in-flight
2687  if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2688  MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
2689  peer.m_last_getheaders_timestamp = current_time;
2690  return true;
2691  }
2692  return false;
2693 }
2694 
2695 /*
2696  * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2697  * We require that the given tip have at least as much work as our tip, and for
2698  * our current tip to be "close to synced" (see CanDirectFetch()).
2699  */
2700 void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2701 {
2702  LOCK(cs_main);
2703  CNodeState *nodestate = State(pfrom.GetId());
2704 
2705  if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2706  std::vector<const CBlockIndex*> vToFetch;
2707  const CBlockIndex* pindexWalk{&last_header};
2708  // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2709  while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2710  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2711  !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2712  (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2713  // We don't have this block, and it's not yet in flight.
2714  vToFetch.push_back(pindexWalk);
2715  }
2716  pindexWalk = pindexWalk->pprev;
2717  }
2718  // If pindexWalk still isn't on our main chain, we're looking at a
2719  // very large reorg at a time we think we're close to caught up to
2720  // the main chain -- this shouldn't really happen. Bail out on the
2721  // direct fetch and rely on parallel download instead.
2722  if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2723  LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2724  last_header.GetBlockHash().ToString(),
2725  last_header.nHeight);
2726  } else {
2727  std::vector<CInv> vGetData;
2728  // Download as much as possible, from earliest to latest.
2729  for (const CBlockIndex* pindex : vToFetch | std::views::reverse) {
2730  if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2731  // Can't download any more from this peer
2732  break;
2733  }
2734  uint32_t nFetchFlags = GetFetchFlags(peer);
2735  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2736  BlockRequested(pfrom.GetId(), *pindex);
2737  LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n",
2738  pindex->GetBlockHash().ToString(), pfrom.GetId());
2739  }
2740  if (vGetData.size() > 1) {
2741  LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2742  last_header.GetBlockHash().ToString(),
2743  last_header.nHeight);
2744  }
2745  if (vGetData.size() > 0) {
2746  if (!m_opts.ignore_incoming_txs &&
2747  nodestate->m_provides_cmpctblocks &&
2748  vGetData.size() == 1 &&
2749  mapBlocksInFlight.size() == 1 &&
2750  last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2751  // In any case, we want to download using a compact block, not a regular one
2752  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2753  }
2754  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
2755  }
2756  }
2757  }
2758 }
2759 
2765 void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2766  const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2767 {
2768  LOCK(cs_main);
2769  CNodeState *nodestate = State(pfrom.GetId());
2770 
2771  UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2772 
2773  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2774  // because it is set in UpdateBlockAvailability. Some nullptr checks
2775  // are still present, however, as belt-and-suspenders.
2776 
2777  if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2778  nodestate->m_last_block_announcement = GetTime();
2779  }
2780 
2781  // If we're in IBD, we want outbound peers that will serve us a useful
2782  // chain. Disconnect peers that are on chains with insufficient work.
2783  if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2784  // If the peer has no more headers to give us, then we know we have
2785  // their tip.
2786  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2787  // This peer has too little work on their headers chain to help
2788  // us sync -- disconnect if it is an outbound disconnection
2789  // candidate.
2790  // Note: We compare their tip to the minimum chain work (rather than
2791  // m_chainman.ActiveChain().Tip()) because we won't start block download
2792  // until we have a headers chain that has at least
2793  // the minimum chain work, even if a peer has a chain past our tip,
2794  // as an anti-DoS measure.
2795  if (pfrom.IsOutboundOrBlockRelayConn()) {
2796  LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs));
2797  pfrom.fDisconnect = true;
2798  }
2799  }
2800  }
2801 
2802  // If this is an outbound full-relay peer, check to see if we should protect
2803  // it from the bad/lagging chain logic.
2804  // Note that outbound block-relay peers are excluded from this protection, and
2805  // thus always subject to eviction under the bad/lagging chain logic.
2806  // See ChainSyncTimeoutState.
2807  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2808  if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2809  LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2810  nodestate->m_chain_sync.m_protect = true;
2811  ++m_outbound_peers_with_protect_from_disconnect;
2812  }
2813  }
2814 }
2815 
2816 void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2817  std::vector<CBlockHeader>&& headers,
2818  bool via_compact_block)
2819 {
2820  size_t nCount = headers.size();
2821 
2822  if (nCount == 0) {
2823  // Nothing interesting. Stop asking this peers for more headers.
2824  // If we were in the middle of headers sync, receiving an empty headers
2825  // message suggests that the peer suddenly has nothing to give us
2826  // (perhaps it reorged to our chain). Clear download state for this peer.
2827  LOCK(peer.m_headers_sync_mutex);
2828  if (peer.m_headers_sync) {
2829  peer.m_headers_sync.reset(nullptr);
2830  LOCK(m_headers_presync_mutex);
2831  m_headers_presync_stats.erase(pfrom.GetId());
2832  }
2833  // A headers message with no headers cannot be an announcement, so assume
2834  // it is a response to our last getheaders request, if there is one.
2835  peer.m_last_getheaders_timestamp = {};
2836  return;
2837  }
2838 
2839  // Before we do any processing, make sure these pass basic sanity checks.
2840  // We'll rely on headers having valid proof-of-work further down, as an
2841  // anti-DoS criteria (note: this check is required before passing any
2842  // headers into HeadersSyncState).
2843  if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
2844  // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2845  // just return. (Note that even if a header is announced via compact
2846  // block, the header itself should be valid, so this type of error can
2847  // always be punished.)
2848  return;
2849  }
2850 
2851  const CBlockIndex *pindexLast = nullptr;
2852 
2853  // We'll set already_validated_work to true if these headers are
2854  // successfully processed as part of a low-work headers sync in progress
2855  // (either in PRESYNC or REDOWNLOAD phase).
2856  // If true, this will mean that any headers returned to us (ie during
2857  // REDOWNLOAD) can be validated without further anti-DoS checks.
2858  bool already_validated_work = false;
2859 
2860  // If we're in the middle of headers sync, let it do its magic.
2861  bool have_headers_sync = false;
2862  {
2863  LOCK(peer.m_headers_sync_mutex);
2864 
2865  already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2866 
2867  // The headers we passed in may have been:
2868  // - untouched, perhaps if no headers-sync was in progress, or some
2869  // failure occurred
2870  // - erased, such as if the headers were successfully processed and no
2871  // additional headers processing needs to take place (such as if we
2872  // are still in PRESYNC)
2873  // - replaced with headers that are now ready for validation, such as
2874  // during the REDOWNLOAD phase of a low-work headers sync.
2875  // So just check whether we still have headers that we need to process,
2876  // or not.
2877  if (headers.empty()) {
2878  return;
2879  }
2880 
2881  have_headers_sync = !!peer.m_headers_sync;
2882  }
2883 
2884  // Do these headers connect to something in our block index?
2885  const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
2886  bool headers_connect_blockindex{chain_start_header != nullptr};
2887 
2888  if (!headers_connect_blockindex) {
2889  // This could be a BIP 130 block announcement, use
2890  // special logic for handling headers that don't connect, as this
2891  // could be benign.
2892  HandleUnconnectingHeaders(pfrom, peer, headers);
2893  return;
2894  }
2895 
2896  // If headers connect, assume that this is in response to any outstanding getheaders
2897  // request we may have sent, and clear out the time of our last request. Non-connecting
2898  // headers cannot be a response to a getheaders request.
2899  peer.m_last_getheaders_timestamp = {};
2900 
2901  // If the headers we received are already in memory and an ancestor of
2902  // m_best_header or our tip, skip anti-DoS checks. These headers will not
2903  // use any more memory (and we are not leaking information that could be
2904  // used to fingerprint us).
2905  const CBlockIndex *last_received_header{nullptr};
2906  {
2907  LOCK(cs_main);
2908  last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
2909  if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2910  already_validated_work = true;
2911  }
2912  }
2913 
2914  // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
2915  // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
2916  // on startup).
2918  already_validated_work = true;
2919  }
2920 
2921  // At this point, the headers connect to something in our block index.
2922  // Do anti-DoS checks to determine if we should process or store for later
2923  // processing.
2924  if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2925  chain_start_header, headers)) {
2926  // If we successfully started a low-work headers sync, then there
2927  // should be no headers to process any further.
2928  Assume(headers.empty());
2929  return;
2930  }
2931 
2932  // At this point, we have a set of headers with sufficient work on them
2933  // which can be processed.
2934 
2935  // If we don't have the last header, then this peer will have given us
2936  // something new (if these headers are valid).
2937  bool received_new_header{last_received_header == nullptr};
2938 
2939  // Now process all the headers.
2940  BlockValidationState state;
2941  if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) {
2942  if (state.IsInvalid()) {
2943  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
2944  return;
2945  }
2946  }
2947  assert(pindexLast);
2948 
2949  // Consider fetching more headers if we are not using our headers-sync mechanism.
2950  if (nCount == m_opts.max_headers_result && !have_headers_sync) {
2951  // Headers message had its maximum size; the peer may have more headers.
2952  if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
2953  LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2954  pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
2955  }
2956  }
2957 
2958  UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
2959 
2960  // Consider immediately downloading blocks.
2961  HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
2962 
2963  return;
2964 }
2965 
2966 std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state,
2967  bool first_time_failure)
2968 {
2969  AssertLockNotHeld(m_peer_mutex);
2970  AssertLockHeld(g_msgproc_mutex);
2971  AssertLockHeld(m_tx_download_mutex);
2972 
2973  PeerRef peer{GetPeerRef(nodeid)};
2974 
2975  LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
2976  ptx->GetHash().ToString(),
2977  ptx->GetWitnessHash().ToString(),
2978  nodeid,
2979  state.ToString());
2980 
2981  const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure);
2982 
2983  if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
2984  AddToCompactExtraTransactions(ptx);
2985  }
2986  for (const Txid& parent_txid : unique_parents) {
2987  if (peer) AddKnownTx(*peer, parent_txid);
2988  }
2989 
2990  MaybePunishNodeForTx(nodeid, state);
2991 
2992  return package_to_validate;
2993 }
2994 
2995 void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
2996 {
2997  AssertLockNotHeld(m_peer_mutex);
2998  AssertLockHeld(g_msgproc_mutex);
2999  AssertLockHeld(m_tx_download_mutex);
3000 
3001  m_txdownloadman.MempoolAcceptedTx(tx);
3002 
3003  LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3004  nodeid,
3005  tx->GetHash().ToString(),
3006  tx->GetWitnessHash().ToString(),
3007  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3008 
3009  RelayTransaction(tx->GetHash(), tx->GetWitnessHash());
3010 
3011  for (const CTransactionRef& removedTx : replaced_transactions) {
3012  AddToCompactExtraTransactions(removedTx);
3013  }
3014 }
3015 
3016 void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
3017 {
3018  AssertLockNotHeld(m_peer_mutex);
3019  AssertLockHeld(g_msgproc_mutex);
3020  AssertLockHeld(m_tx_download_mutex);
3021 
3022  const auto& package = package_to_validate.m_txns;
3023  const auto& senders = package_to_validate.m_senders;
3024 
3025  if (package_result.m_state.IsInvalid()) {
3026  m_txdownloadman.MempoolRejectedPackage(package);
3027  }
3028  // We currently only expect to process 1-parent-1-child packages. Remove if this changes.
3029  if (!Assume(package.size() == 2)) return;
3030 
3031  // Iterate backwards to erase in-package descendants from the orphanage before they become
3032  // relevant in AddChildrenToWorkSet.
3033  auto package_iter = package.rbegin();
3034  auto senders_iter = senders.rbegin();
3035  while (package_iter != package.rend()) {
3036  const auto& tx = *package_iter;
3037  const NodeId nodeid = *senders_iter;
3038  const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())};
3039 
3040  // It is not guaranteed that a result exists for every transaction.
3041  if (it_result != package_result.m_tx_results.end()) {
3042  const auto& tx_result = it_result->second;
3043  switch (tx_result.m_result_type) {
3045  {
3046  ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions);
3047  break;
3048  }
3051  {
3052  // Don't add to vExtraTxnForCompact, as these transactions should have already been
3053  // added there when added to the orphanage or rejected for TX_RECONSIDERABLE.
3054  // This should be updated if package submission is ever used for transactions
3055  // that haven't already been validated before.
3056  ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false);
3057  break;
3058  }
3060  {
3061  // AlreadyHaveTx() should be catching transactions that are already in mempool.
3062  Assume(false);
3063  break;
3064  }
3065  }
3066  }
3067  package_iter++;
3068  senders_iter++;
3069  }
3070 }
3071 
3072 bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3073 {
3074  AssertLockHeld(g_msgproc_mutex);
3075  LOCK2(::cs_main, m_tx_download_mutex);
3076 
3077  CTransactionRef porphanTx = nullptr;
3078 
3079  while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) {
3080  const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3081  const TxValidationState& state = result.m_state;
3082  const Txid& orphanHash = porphanTx->GetHash();
3083  const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3084 
3085  if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
3086  LogDebug(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3087  ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions);
3088  return true;
3089  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3090  LogDebug(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3091  orphanHash.ToString(),
3092  orphan_wtxid.ToString(),
3093  peer.m_id,
3094  state.ToString());
3095 
3096  if (Assume(state.IsInvalid() &&
3100  ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false);
3101  }
3102  return true;
3103  }
3104  }
3105 
3106  return false;
3107 }
3108 
3109 bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3110  BlockFilterType filter_type, uint32_t start_height,
3111  const uint256& stop_hash, uint32_t max_height_diff,
3112  const CBlockIndex*& stop_index,
3113  BlockFilterIndex*& filter_index)
3114 {
3115  const bool supported_filter_type =
3116  (filter_type == BlockFilterType::BASIC &&
3117  (peer.m_our_services & NODE_COMPACT_FILTERS));
3118  if (!supported_filter_type) {
3119  LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n",
3120  static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs));
3121  node.fDisconnect = true;
3122  return false;
3123  }
3124 
3125  {
3126  LOCK(cs_main);
3127  stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3128 
3129  // Check that the stop block exists and the peer would be allowed to fetch it.
3130  if (!stop_index || !BlockRequestAllowed(stop_index)) {
3131  LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n",
3132  stop_hash.ToString(), node.DisconnectMsg(fLogIPs));
3133  node.fDisconnect = true;
3134  return false;
3135  }
3136  }
3137 
3138  uint32_t stop_height = stop_index->nHeight;
3139  if (start_height > stop_height) {
3140  LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with "
3141  "start height %d and stop height %d, %s\n",
3142  start_height, stop_height, node.DisconnectMsg(fLogIPs));
3143  node.fDisconnect = true;
3144  return false;
3145  }
3146  if (stop_height - start_height >= max_height_diff) {
3147  LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n",
3148  stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs));
3149  node.fDisconnect = true;
3150  return false;
3151  }
3152 
3153  filter_index = GetBlockFilterIndex(filter_type);
3154  if (!filter_index) {
3155  LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3156  return false;
3157  }
3158 
3159  return true;
3160 }
3161 
3162 void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv)
3163 {
3164  uint8_t filter_type_ser;
3165  uint32_t start_height;
3166  uint256 stop_hash;
3167 
3168  vRecv >> filter_type_ser >> start_height >> stop_hash;
3169 
3170  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3171 
3172  const CBlockIndex* stop_index;
3173  BlockFilterIndex* filter_index;
3174  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3175  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3176  return;
3177  }
3178 
3179  std::vector<BlockFilter> filters;
3180  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3181  LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3182  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3183  return;
3184  }
3185 
3186  for (const auto& filter : filters) {
3187  MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
3188  }
3189 }
3190 
3191 void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv)
3192 {
3193  uint8_t filter_type_ser;
3194  uint32_t start_height;
3195  uint256 stop_hash;
3196 
3197  vRecv >> filter_type_ser >> start_height >> stop_hash;
3198 
3199  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3200 
3201  const CBlockIndex* stop_index;
3202  BlockFilterIndex* filter_index;
3203  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3204  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3205  return;
3206  }
3207 
3208  uint256 prev_header;
3209  if (start_height > 0) {
3210  const CBlockIndex* const prev_block =
3211  stop_index->GetAncestor(static_cast<int>(start_height - 1));
3212  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3213  LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3214  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3215  return;
3216  }
3217  }
3218 
3219  std::vector<uint256> filter_hashes;
3220  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3221  LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3222  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3223  return;
3224  }
3225 
3226  MakeAndPushMessage(node, NetMsgType::CFHEADERS,
3227  filter_type_ser,
3228  stop_index->GetBlockHash(),
3229  prev_header,
3230  filter_hashes);
3231 }
3232 
3233 void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv)
3234 {
3235  uint8_t filter_type_ser;
3236  uint256 stop_hash;
3237 
3238  vRecv >> filter_type_ser >> stop_hash;
3239 
3240  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3241 
3242  const CBlockIndex* stop_index;
3243  BlockFilterIndex* filter_index;
3244  if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3245  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3246  stop_index, filter_index)) {
3247  return;
3248  }
3249 
3250  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3251 
3252  // Populate headers.
3253  const CBlockIndex* block_index = stop_index;
3254  for (int i = headers.size() - 1; i >= 0; i--) {
3255  int height = (i + 1) * CFCHECKPT_INTERVAL;
3256  block_index = block_index->GetAncestor(height);
3257 
3258  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3259  LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3260  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3261  return;
3262  }
3263  }
3264 
3265  MakeAndPushMessage(node, NetMsgType::CFCHECKPT,
3266  filter_type_ser,
3267  stop_index->GetBlockHash(),
3268  headers);
3269 }
3270 
3271 void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3272 {
3273  bool new_block{false};
3274  m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3275  if (new_block) {
3276  node.m_last_block_time = GetTime<std::chrono::seconds>();
3277  // In case this block came from a different peer than we requested
3278  // from, we can erase the block request now anyway (as we just stored
3279  // this block to disk).
3280  LOCK(cs_main);
3281  RemoveBlockRequest(block->GetHash(), std::nullopt);
3282  } else {
3283  LOCK(cs_main);
3284  mapBlockSource.erase(block->GetHash());
3285  }
3286 }
3287 
3288 void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3289 {
3290  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3291  bool fBlockRead{false};
3292  {
3293  LOCK(cs_main);
3294 
3295  auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3296  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3297  bool requested_block_from_this_peer{false};
3298 
3299  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3300  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3301 
3302  while (range_flight.first != range_flight.second) {
3303  auto [node_id, block_it] = range_flight.first->second;
3304  if (node_id == pfrom.GetId() && block_it->partialBlock) {
3305  requested_block_from_this_peer = true;
3306  break;
3307  }
3308  range_flight.first++;
3309  }
3310 
3311  if (!requested_block_from_this_peer) {
3312  LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3313  return;
3314  }
3315 
3316  PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3317  ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3318  if (status == READ_STATUS_INVALID) {
3319  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3320  Misbehaving(peer, "invalid compact block/non-matching block transactions");
3321  return;
3322  } else if (status == READ_STATUS_FAILED) {
3323  if (first_in_flight) {
3324  // Might have collided, fall back to getdata now :(
3325  std::vector<CInv> invs;
3326  invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3327  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
3328  } else {
3329  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3330  LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3331  return;
3332  }
3333  } else {
3334  // Block is either okay, or possibly we received
3335  // READ_STATUS_CHECKBLOCK_FAILED.
3336  // Note that CheckBlock can only fail for one of a few reasons:
3337  // 1. bad-proof-of-work (impossible here, because we've already
3338  // accepted the header)
3339  // 2. merkleroot doesn't match the transactions given (already
3340  // caught in FillBlock with READ_STATUS_FAILED, so
3341  // impossible here)
3342  // 3. the block is otherwise invalid (eg invalid coinbase,
3343  // block is too big, too many legacy sigops, etc).
3344  // So if CheckBlock failed, #3 is the only possibility.
3345  // Under BIP 152, we don't discourage the peer unless proof of work is
3346  // invalid (we don't require all the stateless checks to have
3347  // been run). This is handled below, so just treat this as
3348  // though the block was successfully read, and rely on the
3349  // handling in ProcessNewBlock to ensure the block index is
3350  // updated, etc.
3351  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3352  fBlockRead = true;
3353  // mapBlockSource is used for potentially punishing peers and
3354  // updating which peers send us compact blocks, so the race
3355  // between here and cs_main in ProcessNewBlock is fine.
3356  // BIP 152 permits peers to relay compact blocks after validating
3357  // the header only; we should not punish peers if the block turns
3358  // out to be invalid.
3359  mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3360  }
3361  } // Don't hold cs_main when we call into ProcessNewBlock
3362  if (fBlockRead) {
3363  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3364  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3365  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3366  // disk-space attacks), but this should be safe due to the
3367  // protections in the compact block handler -- see related comment
3368  // in compact block optimistic reconstruction handling.
3369  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3370  }
3371  return;
3372 }
3373 
3374 void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
3375  const std::chrono::microseconds time_received,
3376  const std::atomic<bool>& interruptMsgProc)
3377 {
3378  AssertLockHeld(g_msgproc_mutex);
3379 
3380  LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3381 
3382  PeerRef peer = GetPeerRef(pfrom.GetId());
3383  if (peer == nullptr) return;
3384 
3385  if (msg_type == NetMsgType::VERSION) {
3386  if (pfrom.nVersion != 0) {
3387  LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3388  return;
3389  }
3390 
3391  int64_t nTime;
3392  CService addrMe;
3393  uint64_t nNonce = 1;
3394  ServiceFlags nServices;
3395  int nVersion;
3396  std::string cleanSubVer;
3397  int starting_height = -1;
3398  bool fRelay = true;
3399 
3400  vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3401  if (nTime < 0) {
3402  nTime = 0;
3403  }
3404  vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3405  vRecv >> CNetAddr::V1(addrMe);
3406  if (!pfrom.IsInboundConn())
3407  {
3408  // Overwrites potentially existing services. In contrast to this,
3409  // unvalidated services received via gossip relay in ADDR/ADDRV2
3410  // messages are only ever added but cannot replace existing ones.
3411  m_addrman.SetServices(pfrom.addr, nServices);
3412  }
3413  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3414  {
3415  LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n",
3416  nServices,
3417  GetDesirableServiceFlags(nServices),
3418  pfrom.DisconnectMsg(fLogIPs));
3419  pfrom.fDisconnect = true;
3420  return;
3421  }
3422 
3423  if (nVersion < MIN_PEER_PROTO_VERSION) {
3424  // disconnect from peers older than this proto version
3425  LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs));
3426  pfrom.fDisconnect = true;
3427  return;
3428  }
3429 
3430  if (!vRecv.empty()) {
3431  // The version message includes information about the sending node which we don't use:
3432  // - 8 bytes (service bits)
3433  // - 16 bytes (ipv6 address)
3434  // - 2 bytes (port)
3435  vRecv.ignore(26);
3436  vRecv >> nNonce;
3437  }
3438  if (!vRecv.empty()) {
3439  std::string strSubVer;
3440  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3441  cleanSubVer = SanitizeString(strSubVer);
3442  }
3443  if (!vRecv.empty()) {
3444  vRecv >> starting_height;
3445  }
3446  if (!vRecv.empty())
3447  vRecv >> fRelay;
3448  // Disconnect if we connected to ourself
3449  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3450  {
3451  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3452  pfrom.fDisconnect = true;
3453  return;
3454  }
3455 
3456  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3457  {
3458  SeenLocal(addrMe);
3459  }
3460 
3461  // Inbound peers send us their version message when they connect.
3462  // We send our version message in response.
3463  if (pfrom.IsInboundConn()) {
3464  PushNodeVersion(pfrom, *peer);
3465  }
3466 
3467  // Change version
3468  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3469  pfrom.SetCommonVersion(greatest_common_version);
3470  pfrom.nVersion = nVersion;
3471 
3472  if (greatest_common_version >= WTXID_RELAY_VERSION) {
3473  MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY);
3474  }
3475 
3476  // Signal ADDRv2 support (BIP155).
3477  if (greatest_common_version >= 70016) {
3478  // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3479  // implementations reject messages they don't know. As a courtesy, don't send
3480  // it to nodes with a version before 70016, as no software is known to support
3481  // BIP155 that doesn't announce at least that protocol version number.
3482  MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
3483  }
3484 
3485  pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3486  peer->m_their_services = nServices;
3487  pfrom.SetAddrLocal(addrMe);
3488  {
3489  LOCK(pfrom.m_subver_mutex);
3490  pfrom.cleanSubVer = cleanSubVer;
3491  }
3492  peer->m_starting_height = starting_height;
3493 
3494  // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3495  // - this isn't an outbound block-relay-only connection, and
3496  // - this isn't an outbound feeler connection, and
3497  // - fRelay=true (the peer wishes to receive transaction announcements)
3498  // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3499  // the peer may turn on transaction relay later.
3500  if (!pfrom.IsBlockOnlyConn() &&
3501  !pfrom.IsFeelerConn() &&
3502  (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3503  auto* const tx_relay = peer->SetTxRelay();
3504  {
3505  LOCK(tx_relay->m_bloom_filter_mutex);
3506  tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3507  }
3508  if (fRelay) pfrom.m_relays_txs = true;
3509  }
3510 
3511  if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3512  // Per BIP-330, we announce txreconciliation support if:
3513  // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3514  // - transaction relay is supported per the peer's VERSION message
3515  // - this is not a block-relay-only connection and not a feeler
3516  // - this is not an addr fetch connection;
3517  // - we are not in -blocksonly mode.
3518  const auto* tx_relay = peer->GetTxRelay();
3519  if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3520  !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3521  const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3522  MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL,
3523  TXRECONCILIATION_VERSION, recon_salt);
3524  }
3525  }
3526 
3527  MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3528 
3529  // Potentially mark this peer as a preferred download peer.
3530  {
3531  LOCK(cs_main);
3532  CNodeState* state = State(pfrom.GetId());
3533  state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3534  m_num_preferred_download_peers += state->fPreferredDownload;
3535  }
3536 
3537  // Attempt to initialize address relay for outbound peers and use result
3538  // to decide whether to send GETADDR, so that we don't send it to
3539  // inbound or outbound block-relay-only peers.
3540  bool send_getaddr{false};
3541  if (!pfrom.IsInboundConn()) {
3542  send_getaddr = SetupAddressRelay(pfrom, *peer);
3543  }
3544  if (send_getaddr) {
3545  // Do a one-time address fetch to help populate/update our addrman.
3546  // If we're starting up for the first time, our addrman may be pretty
3547  // empty, so this mechanism is important to help us connect to the network.
3548  // We skip this for block-relay-only peers. We want to avoid
3549  // potentially leaking addr information and we do not want to
3550  // indicate to the peer that we will participate in addr relay.
3551  MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
3552  peer->m_getaddr_sent = true;
3553  // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3554  // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3555  peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3556  }
3557 
3558  if (!pfrom.IsInboundConn()) {
3559  // For non-inbound connections, we update the addrman to record
3560  // connection success so that addrman will have an up-to-date
3561  // notion of which peers are online and available.
3562  //
3563  // While we strive to not leak information about block-relay-only
3564  // connections via the addrman, not moving an address to the tried
3565  // table is also potentially detrimental because new-table entries
3566  // are subject to eviction in the event of addrman collisions. We
3567  // mitigate the information-leak by never calling
3568  // AddrMan::Connected() on block-relay-only peers; see
3569  // FinalizeNode().
3570  //
3571  // This moves an address from New to Tried table in Addrman,
3572  // resolves tried-table collisions, etc.
3573  m_addrman.Good(pfrom.addr);
3574  }
3575 
3576  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3577  LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3578  cleanSubVer, pfrom.nVersion,
3579  peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3580  pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3581 
3582  peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
3583  if (!pfrom.IsInboundConn()) {
3584  // Don't use timedata samples from inbound peers to make it
3585  // harder for others to create false warnings about our clock being out of sync.
3586  m_outbound_time_offsets.Add(peer->m_time_offset);
3587  m_outbound_time_offsets.WarnIfOutOfSync();
3588  }
3589 
3590  // If the peer is old enough to have the old alert system, send it the final alert.
3591  if (greatest_common_version <= 70012) {
3592  constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex};
3593  MakeAndPushMessage(pfrom, "alert", finalAlert);
3594  }
3595 
3596  // Feeler connections exist only to verify if address is online.
3597  if (pfrom.IsFeelerConn()) {
3598  LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
3599  pfrom.fDisconnect = true;
3600  }
3601  return;
3602  }
3603 
3604  if (pfrom.nVersion == 0) {
3605  // Must have a version message before anything else
3606  LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3607  return;
3608  }
3609 
3610  if (msg_type == NetMsgType::VERACK) {
3611  if (pfrom.fSuccessfullyConnected) {
3612  LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3613  return;
3614  }
3615 
3616  // Log successful connections unconditionally for outbound, but not for inbound as those
3617  // can be triggered by an attacker at high rate.
3619  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3620  LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3621  pfrom.ConnectionTypeAsString(),
3622  TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3623  pfrom.nVersion.load(), peer->m_starting_height,
3624  pfrom.GetId(), pfrom.LogIP(fLogIPs),
3625  (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3626  }
3627 
3628  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3629  // Tell our peer we are willing to provide version 2 cmpctblocks.
3630  // However, we do not request new block announcements using
3631  // cmpctblock messages.
3632  // We send this to non-NODE NETWORK peers as well, because
3633  // they may wish to request compact blocks from us
3634  MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
3635  }
3636 
3637  if (m_txreconciliation) {
3638  if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3639  // We could have optimistically pre-registered/registered the peer. In that case,
3640  // we should forget about the reconciliation state here if this wasn't followed
3641  // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3642  m_txreconciliation->ForgetPeer(pfrom.GetId());
3643  }
3644  }
3645 
3646  if (auto tx_relay = peer->GetTxRelay()) {
3647  // `TxRelay::m_tx_inventory_to_send` must be empty before the
3648  // version handshake is completed as
3649  // `TxRelay::m_next_inv_send_time` is first initialised in
3650  // `SendMessages` after the verack is received. Any transactions
3651  // received during the version handshake would otherwise
3652  // immediately be advertised without random delay, potentially
3653  // leaking the time of arrival to a spy.
3654  Assume(WITH_LOCK(
3655  tx_relay->m_tx_inventory_mutex,
3656  return tx_relay->m_tx_inventory_to_send.empty() &&
3657  tx_relay->m_next_inv_send_time == 0s));
3658  }
3659 
3660  {
3661  LOCK2(::cs_main, m_tx_download_mutex);
3662  const CNodeState* state = State(pfrom.GetId());
3663  m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo {
3664  .m_preferred = state->fPreferredDownload,
3665  .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay),
3666  .m_wtxid_relay = peer->m_wtxid_relay,
3667  });
3668  }
3669 
3670  pfrom.fSuccessfullyConnected = true;
3671  return;
3672  }
3673 
3674  if (msg_type == NetMsgType::SENDHEADERS) {
3675  peer->m_prefers_headers = true;
3676  return;
3677  }
3678 
3679  if (msg_type == NetMsgType::SENDCMPCT) {
3680  bool sendcmpct_hb{false};
3681  uint64_t sendcmpct_version{0};
3682  vRecv >> sendcmpct_hb >> sendcmpct_version;
3683 
3684  // Only support compact block relay with witnesses
3685  if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3686 
3687  LOCK(cs_main);
3688  CNodeState* nodestate = State(pfrom.GetId());
3689  nodestate->m_provides_cmpctblocks = true;
3690  nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3691  // save whether peer selects us as BIP152 high-bandwidth peer
3692  // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3693  pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3694  return;
3695  }
3696 
3697  // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3698  // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3699  if (msg_type == NetMsgType::WTXIDRELAY) {
3700  if (pfrom.fSuccessfullyConnected) {
3701  // Disconnect peers that send a wtxidrelay message after VERACK.
3702  LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3703  pfrom.fDisconnect = true;
3704  return;
3705  }
3706  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3707  if (!peer->m_wtxid_relay) {
3708  peer->m_wtxid_relay = true;
3709  m_wtxid_relay_peers++;
3710  } else {
3711  LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3712  }
3713  } else {
3714  LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3715  }
3716  return;
3717  }
3718 
3719  // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3720  // between VERSION and VERACK.
3721  if (msg_type == NetMsgType::SENDADDRV2) {
3722  if (pfrom.fSuccessfullyConnected) {
3723  // Disconnect peers that send a SENDADDRV2 message after VERACK.
3724  LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3725  pfrom.fDisconnect = true;
3726  return;
3727  }
3728  peer->m_wants_addrv2 = true;
3729  return;
3730  }
3731 
3732  // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3733  // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3734  // from switching announcement protocols after the connection is up.
3735  if (msg_type == NetMsgType::SENDTXRCNCL) {
3736  if (!m_txreconciliation) {
3737  LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3738  return;
3739  }
3740 
3741  if (pfrom.fSuccessfullyConnected) {
3742  LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3743  pfrom.fDisconnect = true;
3744  return;
3745  }
3746 
3747  // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3748  if (RejectIncomingTxs(pfrom)) {
3749  LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs));
3750  pfrom.fDisconnect = true;
3751  return;
3752  }
3753 
3754  // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3755  // This flag might also be false in other cases, but the RejectIncomingTxs check above
3756  // eliminates them, so that this flag fully represents what we are looking for.
3757  const auto* tx_relay = peer->GetTxRelay();
3758  if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3759  LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs));
3760  pfrom.fDisconnect = true;
3761  return;
3762  }
3763 
3764  uint32_t peer_txreconcl_version;
3765  uint64_t remote_salt;
3766  vRecv >> peer_txreconcl_version >> remote_salt;
3767 
3768  const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3769  peer_txreconcl_version, remote_salt);
3770  switch (result) {
3772  LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
3773  break;
3775  break;
3777  LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs));
3778  pfrom.fDisconnect = true;
3779  return;
3781  LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs));
3782  pfrom.fDisconnect = true;
3783  return;
3784  }
3785  return;
3786  }
3787 
3788  if (!pfrom.fSuccessfullyConnected) {
3789  LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3790  return;
3791  }
3792 
3793  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3794  const auto ser_params{
3795  msg_type == NetMsgType::ADDRV2 ?
3796  // Set V2 param so that the CNetAddr and CAddress
3797  // unserialize methods know that an address in v2 format is coming.
3800  };
3801 
3802  std::vector<CAddress> vAddr;
3803 
3804  vRecv >> ser_params(vAddr);
3805 
3806  if (!SetupAddressRelay(pfrom, *peer)) {
3807  LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3808  return;
3809  }
3810 
3811  if (vAddr.size() > MAX_ADDR_TO_SEND)
3812  {
3813  Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size()));
3814  return;
3815  }
3816 
3817  // Store the new addresses
3818  std::vector<CAddress> vAddrOk;
3819  const auto current_a_time{Now<NodeSeconds>()};
3820 
3821  // Update/increment addr rate limiting bucket.
3822  const auto current_time{GetTime<std::chrono::microseconds>()};
3823  if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3824  // Don't increment bucket if it's already full
3825  const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3826  const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
3827  peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
3828  }
3829  peer->m_addr_token_timestamp = current_time;
3830 
3831  const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
3832  uint64_t num_proc = 0;
3833  uint64_t num_rate_limit = 0;
3834  std::shuffle(vAddr.begin(), vAddr.end(), m_rng);
3835  for (CAddress& addr : vAddr)
3836  {
3837  if (interruptMsgProc)
3838  return;
3839 
3840  // Apply rate limiting.
3841  if (peer->m_addr_token_bucket < 1.0) {
3842  if (rate_limited) {
3843  ++num_rate_limit;
3844  continue;
3845  }
3846  } else {
3847  peer->m_addr_token_bucket -= 1.0;
3848  }
3849  // We only bother storing full nodes, though this may include
3850  // things which we would not make an outbound connection to, in
3851  // part because we may make feeler connections to them.
3852  if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
3853  continue;
3854 
3855  if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
3856  addr.nTime = current_a_time - 5 * 24h;
3857  }
3858  AddAddressKnown(*peer, addr);
3859  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3860  // Do not process banned/discouraged addresses beyond remembering we received them
3861  continue;
3862  }
3863  ++num_proc;
3864  const bool reachable{g_reachable_nets.Contains(addr)};
3865  if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
3866  // Relay to a limited number of other nodes
3867  RelayAddress(pfrom.GetId(), addr, reachable);
3868  }
3869  // Do not store addresses outside our network
3870  if (reachable) {
3871  vAddrOk.push_back(addr);
3872  }
3873  }
3874  peer->m_addr_processed += num_proc;
3875  peer->m_addr_rate_limited += num_rate_limit;
3876  LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3877  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3878 
3879  m_addrman.Add(vAddrOk, pfrom.addr, 2h);
3880  if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
3881 
3882  // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
3883  if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3884  LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
3885  pfrom.fDisconnect = true;
3886  }
3887  return;
3888  }
3889 
3890  if (msg_type == NetMsgType::INV) {
3891  std::vector<CInv> vInv;
3892  vRecv >> vInv;
3893  if (vInv.size() > MAX_INV_SZ)
3894  {
3895  Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
3896  return;
3897  }
3898 
3899  const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3900 
3901  LOCK2(cs_main, m_tx_download_mutex);
3902 
3903  const auto current_time{GetTime<std::chrono::microseconds>()};
3904  uint256* best_block{nullptr};
3905 
3906  for (CInv& inv : vInv) {
3907  if (interruptMsgProc) return;
3908 
3909  // Ignore INVs that don't match wtxidrelay setting.
3910  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
3911  // This is fine as no INV messages are involved in that process.
3912  if (peer->m_wtxid_relay) {
3913  if (inv.IsMsgTx()) continue;
3914  } else {
3915  if (inv.IsMsgWtx()) continue;
3916  }
3917 
3918  if (inv.IsMsgBlk()) {
3919  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
3920  LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3921 
3922  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
3923  if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
3924  // Headers-first is the primary method of announcement on
3925  // the network. If a node fell back to sending blocks by
3926  // inv, it may be for a re-org, or because we haven't
3927  // completed initial headers sync. The final block hash
3928  // provided should be the highest, so send a getheaders and
3929  // then fetch the blocks we need to catch up.
3930  best_block = &inv.hash;
3931  }
3932  } else if (inv.IsGenTxMsg()) {
3933  if (reject_tx_invs) {
3934  LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs));
3935  pfrom.fDisconnect = true;
3936  return;
3937  }
3938  const GenTxid gtxid = ToGenTxid(inv);
3939  AddKnownTx(*peer, inv.hash);
3940 
3941  if (!m_chainman.IsInitialBlockDownload()) {
3942  const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)};
3943  LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3944  }
3945  } else {
3946  LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
3947  }
3948  }
3949 
3950  if (best_block != nullptr) {
3951  // If we haven't started initial headers-sync with this peer, then
3952  // consider sending a getheaders now. On initial startup, there's a
3953  // reliability vs bandwidth tradeoff, where we are only trying to do
3954  // initial headers sync with one peer at a time, with a long
3955  // timeout (at which point, if the sync hasn't completed, we will
3956  // disconnect the peer and then choose another). In the meantime,
3957  // as new blocks are found, we are willing to add one new peer per
3958  // block to sync with as well, to sync quicker in the case where
3959  // our initial peer is unresponsive (but less bandwidth than we'd
3960  // use if we turned on sync with all peers).
3961  CNodeState& state{*Assert(State(pfrom.GetId()))};
3962  if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
3963  if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
3964  LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3965  m_chainman.m_best_header->nHeight, best_block->ToString(),
3966  pfrom.GetId());
3967  }
3968  if (!state.fSyncStarted) {
3969  peer->m_inv_triggered_getheaders_before_sync = true;
3970  // Update the last block hash that triggered a new headers
3971  // sync, so that we don't turn on headers sync with more
3972  // than 1 new peer every new block.
3973  m_last_block_inv_triggering_headers_sync = *best_block;
3974  }
3975  }
3976  }
3977 
3978  return;
3979  }
3980 
3981  if (msg_type == NetMsgType::GETDATA) {
3982  std::vector<CInv> vInv;
3983  vRecv >> vInv;
3984  if (vInv.size() > MAX_INV_SZ)
3985  {
3986  Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size()));
3987  return;
3988  }
3989 
3990  LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
3991 
3992  if (vInv.size() > 0) {
3993  LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
3994  }
3995 
3996  {
3997  LOCK(peer->m_getdata_requests_mutex);
3998  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
3999  ProcessGetData(pfrom, *peer, interruptMsgProc);
4000  }
4001 
4002  return;
4003  }
4004 
4005  if (msg_type == NetMsgType::GETBLOCKS) {
4006  CBlockLocator locator;
4007  uint256 hashStop;
4008  vRecv >> locator >> hashStop;
4009 
4010  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4011  LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4012  pfrom.fDisconnect = true;
4013  return;
4014  }
4015 
4016  // We might have announced the currently-being-connected tip using a
4017  // compact block, which resulted in the peer sending a getblocks
4018  // request, which we would otherwise respond to without the new block.
4019  // To avoid this situation we simply verify that we are on our best
4020  // known chain now. This is super overkill, but we handle it better
4021  // for getheaders requests, and there are no known nodes which support
4022  // compact blocks but still use getblocks to request blocks.
4023  {
4024  std::shared_ptr<const CBlock> a_recent_block;
4025  {
4026  LOCK(m_most_recent_block_mutex);
4027  a_recent_block = m_most_recent_block;
4028  }
4029  BlockValidationState state;
4030  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4031  LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4032  }
4033  }
4034 
4035  LOCK(cs_main);
4036 
4037  // Find the last block the caller has in the main chain
4038  const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4039 
4040  // Send the rest of the chain
4041  if (pindex)
4042  pindex = m_chainman.ActiveChain().Next(pindex);
4043  int nLimit = 500;
4044  LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4045  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4046  {
4047  if (pindex->GetBlockHash() == hashStop)
4048  {
4049  LogDebug(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4050  break;
4051  }
4052  // If pruning, don't inv blocks unless we have on disk and are likely to still have
4053  // for some reasonable time window (1 hour) that block relay might require.
4054  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4055  if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4056  LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4057  break;
4058  }
4059  WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4060  if (--nLimit <= 0) {
4061  // When this block is requested, we'll send an inv that'll
4062  // trigger the peer to getblocks the next batch of inventory.
4063  LogDebug(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4064  WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4065  break;
4066  }
4067  }
4068  return;
4069  }
4070 
4071  if (msg_type == NetMsgType::GETBLOCKTXN) {
4073  vRecv >> req;
4074 
4075  std::shared_ptr<const CBlock> recent_block;
4076  {
4077  LOCK(m_most_recent_block_mutex);
4078  if (m_most_recent_block_hash == req.blockhash)
4079  recent_block = m_most_recent_block;
4080  // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4081  }
4082  if (recent_block) {
4083  SendBlockTransactions(pfrom, *peer, *recent_block, req);
4084  return;
4085  }
4086 
4087  FlatFilePos block_pos{};
4088  {
4089  LOCK(cs_main);
4090 
4091  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4092  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4093  LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4094  return;
4095  }
4096 
4097  if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4098  block_pos = pindex->GetBlockPos();
4099  }
4100  }
4101 
4102  if (!block_pos.IsNull()) {
4103  CBlock block;
4104  const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)};
4105  // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
4106  // pruned after we release cs_main above, so this read should never fail.
4107  assert(ret);
4108 
4109  SendBlockTransactions(pfrom, *peer, block, req);
4110  return;
4111  }
4112 
4113  // If an older block is requested (should never happen in practice,
4114  // but can happen in tests) send a block response instead of a
4115  // blocktxn response. Sending a full block response instead of a
4116  // small blocktxn response is preferable in the case where a peer
4117  // might maliciously send lots of getblocktxn requests to trigger
4118  // expensive disk reads, because it will require the peer to
4119  // actually receive all the data read from disk over the network.
4120  LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4121  CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
4122  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4123  // The message processing loop will go around again (without pausing) and we'll respond then
4124  return;
4125  }
4126 
4127  if (msg_type == NetMsgType::GETHEADERS) {
4128  CBlockLocator locator;
4129  uint256 hashStop;
4130  vRecv >> locator >> hashStop;
4131 
4132  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4133  LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4134  pfrom.fDisconnect = true;
4135  return;
4136  }
4137 
4138  if (m_chainman.m_blockman.LoadingBlocks()) {
4139  LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4140  return;
4141  }
4142 
4143  LOCK(cs_main);
4144 
4145  // Note that if we were to be on a chain that forks from the checkpointed
4146  // chain, then serving those headers to a peer that has seen the
4147  // checkpointed chain would cause that peer to disconnect us. Requiring
4148  // that our chainwork exceed the minimum chain work is a protection against
4149  // being fed a bogus chain when we started up for the first time and
4150  // getting partitioned off the honest network for serving that chain to
4151  // others.
4152  if (m_chainman.ActiveTip() == nullptr ||
4153  (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4154  LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4155  // Just respond with an empty headers message, to tell the peer to
4156  // go away but not treat us as unresponsive.
4157  MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>());
4158  return;
4159  }
4160 
4161  CNodeState *nodestate = State(pfrom.GetId());
4162  const CBlockIndex* pindex = nullptr;
4163  if (locator.IsNull())
4164  {
4165  // If locator is null, return the hashStop block
4166  pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4167  if (!pindex) {
4168  return;
4169  }
4170 
4171  if (!BlockRequestAllowed(pindex)) {
4172  LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4173  return;
4174  }
4175  }
4176  else
4177  {
4178  // Find the last block the caller has in the main chain
4179  pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4180  if (pindex)
4181  pindex = m_chainman.ActiveChain().Next(pindex);
4182  }
4183 
4184  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4185  std::vector<CBlock> vHeaders;
4186  int nLimit = m_opts.max_headers_result;
4187  LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4188  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4189  {
4190  vHeaders.emplace_back(pindex->GetBlockHeader());
4191  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4192  break;
4193  }
4194  // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4195  // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4196  // headers message). In both cases it's safe to update
4197  // pindexBestHeaderSent to be our tip.
4198  //
4199  // It is important that we simply reset the BestHeaderSent value here,
4200  // and not max(BestHeaderSent, newHeaderSent). We might have announced
4201  // the currently-being-connected tip using a compact block, which
4202  // resulted in the peer sending a headers request, which we respond to
4203  // without the new block. By resetting the BestHeaderSent, we ensure we
4204  // will re-announce the new block via headers (or compact blocks again)
4205  // in the SendMessages logic.
4206  nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4207  MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
4208  return;
4209  }
4210 
4211  if (msg_type == NetMsgType::TX) {
4212  if (RejectIncomingTxs(pfrom)) {
4213  LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs));
4214  pfrom.fDisconnect = true;
4215  return;
4216  }
4217 
4218  // Stop processing the transaction early if we are still in IBD since we don't
4219  // have enough information to validate it yet. Sending unsolicited transactions
4220  // is not considered a protocol violation, so don't punish the peer.
4221  if (m_chainman.IsInitialBlockDownload()) return;
4222 
4223  CTransactionRef ptx;
4224  vRecv >> TX_WITH_WITNESS(ptx);
4225  const CTransaction& tx = *ptx;
4226 
4227  const uint256& txid = ptx->GetHash();
4228  const uint256& wtxid = ptx->GetWitnessHash();
4229 
4230  const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4231  AddKnownTx(*peer, hash);
4232 
4233  LOCK2(cs_main, m_tx_download_mutex);
4234 
4235  const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx);
4236  if (!should_validate) {
4238  // Always relay transactions received from peers with forcerelay
4239  // permission, even if they were already in the mempool, allowing
4240  // the node to function as a gateway for nodes hidden behind it.
4241  if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4242  LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4243  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4244  } else {
4245  LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4246  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4247  RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4248  }
4249  }
4250 
4251  if (package_to_validate) {
4252  const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4253  LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4254  package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4255  ProcessPackageResult(package_to_validate.value(), package_result);
4256  }
4257  return;
4258  }
4259 
4260  // ReceivedTx should not be telling us to validate the tx and a package.
4261  Assume(!package_to_validate.has_value());
4262 
4263  const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4264  const TxValidationState& state = result.m_state;
4265 
4266  if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
4267  ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions);
4268  pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4269  }
4270  if (state.IsInvalid()) {
4271  if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) {
4272  const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4273  LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4274  package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4275  ProcessPackageResult(package_to_validate.value(), package_result);
4276  }
4277  }
4278 
4279  return;
4280  }
4281 
4282  if (msg_type == NetMsgType::CMPCTBLOCK)
4283  {
4284  // Ignore cmpctblock received while importing
4285  if (m_chainman.m_blockman.LoadingBlocks()) {
4286  LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4287  return;
4288  }
4289 
4290  CBlockHeaderAndShortTxIDs cmpctblock;
4291  vRecv >> cmpctblock;
4292 
4293  bool received_new_header = false;
4294  const auto blockhash = cmpctblock.header.GetHash();
4295 
4296  {
4297  LOCK(cs_main);
4298 
4299  const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4300  if (!prev_block) {
4301  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4302  if (!m_chainman.IsInitialBlockDownload()) {
4303  MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4304  }
4305  return;
4306  } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) {
4307  // If we get a low-work header in a compact block, we can ignore it.
4308  LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4309  return;
4310  }
4311 
4312  if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4313  received_new_header = true;
4314  }
4315  }
4316 
4317  const CBlockIndex *pindex = nullptr;
4318  BlockValidationState state;
4319  if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) {
4320  if (state.IsInvalid()) {
4321  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4322  return;
4323  }
4324  }
4325 
4326  if (received_new_header) {
4327  LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
4328  blockhash.ToString(), pfrom.GetId());
4329  }
4330 
4331  bool fProcessBLOCKTXN = false;
4332 
4333  // If we end up treating this as a plain headers message, call that as well
4334  // without cs_main.
4335  bool fRevertToHeaderProcessing = false;
4336 
4337  // Keep a CBlock for "optimistic" compactblock reconstructions (see
4338  // below)
4339  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4340  bool fBlockReconstructed = false;
4341 
4342  {
4343  LOCK(cs_main);
4344  // If AcceptBlockHeader returned true, it set pindex
4345  assert(pindex);
4346  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4347 
4348  CNodeState *nodestate = State(pfrom.GetId());
4349 
4350  // If this was a new header with more work than our tip, update the
4351  // peer's last block announcement time
4352  if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4353  nodestate->m_last_block_announcement = GetTime();
4354  }
4355 
4356  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4357  return;
4358 
4359  auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4360  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4361  bool requested_block_from_this_peer{false};
4362 
4363  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4364  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4365 
4366  while (range_flight.first != range_flight.second) {
4367  if (range_flight.first->second.first == pfrom.GetId()) {
4368  requested_block_from_this_peer = true;
4369  break;
4370  }
4371  range_flight.first++;
4372  }
4373 
4374  if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4375  pindex->nTx != 0) { // We had this block at some point, but pruned it
4376  if (requested_block_from_this_peer) {
4377  // We requested this block for some reason, but our mempool will probably be useless
4378  // so we just grab the block via normal getdata
4379  std::vector<CInv> vInv(1);
4380  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4381  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4382  }
4383  return;
4384  }
4385 
4386  // If we're not close to tip yet, give up and let parallel block fetch work its magic
4387  if (!already_in_flight && !CanDirectFetch()) {
4388  return;
4389  }
4390 
4391  // We want to be a bit conservative just to be extra careful about DoS
4392  // possibilities in compact block processing...
4393  if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4394  if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4395  requested_block_from_this_peer) {
4396  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4397  if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4398  if (!(*queuedBlockIt)->partialBlock)
4399  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4400  else {
4401  // The block was already in flight using compact blocks from the same peer
4402  LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4403  return;
4404  }
4405  }
4406 
4407  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4408  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4409  if (status == READ_STATUS_INVALID) {
4410  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4411  Misbehaving(*peer, "invalid compact block");
4412  return;
4413  } else if (status == READ_STATUS_FAILED) {
4414  if (first_in_flight) {
4415  // Duplicate txindexes, the block is now in-flight, so just request it
4416  std::vector<CInv> vInv(1);
4417  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4418  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4419  } else {
4420  // Give up for this peer and wait for other peer(s)
4421  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4422  }
4423  return;
4424  }
4425 
4427  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4428  if (!partialBlock.IsTxAvailable(i))
4429  req.indexes.push_back(i);
4430  }
4431  if (req.indexes.empty()) {
4432  fProcessBLOCKTXN = true;
4433  } else if (first_in_flight) {
4434  // We will try to round-trip any compact blocks we get on failure,
4435  // as long as it's first...
4436  req.blockhash = pindex->GetBlockHash();
4437  MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4438  } else if (pfrom.m_bip152_highbandwidth_to &&
4439  (!pfrom.IsInboundConn() ||
4440  IsBlockRequestedFromOutbound(blockhash) ||
4441  already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4442  // ... or it's a hb relay peer and:
4443  // - peer is outbound, or
4444  // - we already have an outbound attempt in flight(so we'll take what we can get), or
4445  // - it's not the final parallel download slot (which we may reserve for first outbound)
4446  req.blockhash = pindex->GetBlockHash();
4447  MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4448  } else {
4449  // Give up for this peer and wait for other peer(s)
4450  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4451  }
4452  } else {
4453  // This block is either already in flight from a different
4454  // peer, or this peer has too many blocks outstanding to
4455  // download from.
4456  // Optimistically try to reconstruct anyway since we might be
4457  // able to without any round trips.
4458  PartiallyDownloadedBlock tempBlock(&m_mempool);
4459  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4460  if (status != READ_STATUS_OK) {
4461  // TODO: don't ignore failures
4462  return;
4463  }
4464  std::vector<CTransactionRef> dummy;
4465  status = tempBlock.FillBlock(*pblock, dummy);
4466  if (status == READ_STATUS_OK) {
4467  fBlockReconstructed = true;
4468  }
4469  }
4470  } else {
4471  if (requested_block_from_this_peer) {
4472  // We requested this block, but its far into the future, so our
4473  // mempool will probably be useless - request the block normally
4474  std::vector<CInv> vInv(1);
4475  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4476  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4477  return;
4478  } else {
4479  // If this was an announce-cmpctblock, we want the same treatment as a header message
4480  fRevertToHeaderProcessing = true;
4481  }
4482  }
4483  } // cs_main
4484 
4485  if (fProcessBLOCKTXN) {
4486  BlockTransactions txn;
4487  txn.blockhash = blockhash;
4488  return ProcessCompactBlockTxns(pfrom, *peer, txn);
4489  }
4490 
4491  if (fRevertToHeaderProcessing) {
4492  // Headers received from HB compact block peers are permitted to be
4493  // relayed before full validation (see BIP 152), so we don't want to disconnect
4494  // the peer if the header turns out to be for an invalid block.
4495  // Note that if a peer tries to build on an invalid chain, that
4496  // will be detected and the peer will be disconnected/discouraged.
4497  return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4498  }
4499 
4500  if (fBlockReconstructed) {
4501  // If we got here, we were able to optimistically reconstruct a
4502  // block that is in flight from some other peer.
4503  {
4504  LOCK(cs_main);
4505  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4506  }
4507  // Setting force_processing to true means that we bypass some of
4508  // our anti-DoS protections in AcceptBlock, which filters
4509  // unrequested blocks that might be trying to waste our resources
4510  // (eg disk space). Because we only try to reconstruct blocks when
4511  // we're close to caught up (via the CanDirectFetch() requirement
4512  // above, combined with the behavior of not requesting blocks until
4513  // we have a chain with at least the minimum chain work), and we ignore
4514  // compact blocks with less work than our tip, it is safe to treat
4515  // reconstructed compact blocks as having been requested.
4516  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4517  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4518  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4519  // Clear download state for this block, which is in
4520  // process from some other peer. We do this after calling
4521  // ProcessNewBlock so that a malleated cmpctblock announcement
4522  // can't be used to interfere with block relay.
4523  RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4524  }
4525  }
4526  return;
4527  }
4528 
4529  if (msg_type == NetMsgType::BLOCKTXN)
4530  {
4531  // Ignore blocktxn received while importing
4532  if (m_chainman.m_blockman.LoadingBlocks()) {
4533  LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4534  return;
4535  }
4536 
4537  BlockTransactions resp;
4538  vRecv >> resp;
4539 
4540  return ProcessCompactBlockTxns(pfrom, *peer, resp);
4541  }
4542 
4543  if (msg_type == NetMsgType::HEADERS)
4544  {
4545  // Ignore headers received while importing
4546  if (m_chainman.m_blockman.LoadingBlocks()) {
4547  LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4548  return;
4549  }
4550 
4551  std::vector<CBlockHeader> headers;
4552 
4553  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4554  unsigned int nCount = ReadCompactSize(vRecv);
4555  if (nCount > m_opts.max_headers_result) {
4556  Misbehaving(*peer, strprintf("headers message size = %u", nCount));
4557  return;
4558  }
4559  headers.resize(nCount);
4560  for (unsigned int n = 0; n < nCount; n++) {
4561  vRecv >> headers[n];
4562  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4563  }
4564 
4565  ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
4566 
4567  // Check if the headers presync progress needs to be reported to validation.
4568  // This needs to be done without holding the m_headers_presync_mutex lock.
4569  if (m_headers_presync_should_signal.exchange(false)) {
4570  HeadersPresyncStats stats;
4571  {
4572  LOCK(m_headers_presync_mutex);
4573  auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4574  if (it != m_headers_presync_stats.end()) stats = it->second;
4575  }
4576  if (stats.second) {
4577  m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
4578  }
4579  }
4580 
4581  return;
4582  }
4583 
4584  if (msg_type == NetMsgType::BLOCK)
4585  {
4586  // Ignore block received while importing
4587  if (m_chainman.m_blockman.LoadingBlocks()) {
4588  LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4589  return;
4590  }
4591 
4592  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4593  vRecv >> TX_WITH_WITNESS(*pblock);
4594 
4595  LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4596 
4597  const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
4598 
4599  // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
4600  if (prev_block && IsBlockMutated(/*block=*/*pblock,
4601  /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
4602  LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
4603  Misbehaving(*peer, "mutated block");
4604  WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
4605  return;
4606  }
4607 
4608  bool forceProcessing = false;
4609  const uint256 hash(pblock->GetHash());
4610  bool min_pow_checked = false;
4611  {
4612  LOCK(cs_main);
4613  // Always process the block if we requested it, since we may
4614  // need it even when it's not a candidate for a new best tip.
4615  forceProcessing = IsBlockRequested(hash);
4616  RemoveBlockRequest(hash, pfrom.GetId());
4617  // mapBlockSource is only used for punishing peers and setting
4618  // which peers send us compact blocks, so the race between here and
4619  // cs_main in ProcessNewBlock is fine.
4620  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4621 
4622  // Check claimed work on this block against our anti-dos thresholds.
4623  if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) {
4624  min_pow_checked = true;
4625  }
4626  }
4627  ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4628  return;
4629  }
4630 
4631  if (msg_type == NetMsgType::GETADDR) {
4632  // This asymmetric behavior for inbound and outbound connections was introduced
4633  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4634  // to users' AddrMan and later request them by sending getaddr messages.
4635  // Making nodes which are behind NAT and can only make outgoing connections ignore
4636  // the getaddr message mitigates the attack.
4637  if (!pfrom.IsInboundConn()) {
4638  LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4639  return;
4640  }
4641 
4642  // Since this must be an inbound connection, SetupAddressRelay will
4643  // never fail.
4644  Assume(SetupAddressRelay(pfrom, *peer));
4645 
4646  // Only send one GetAddr response per connection to reduce resource waste
4647  // and discourage addr stamping of INV announcements.
4648  if (peer->m_getaddr_recvd) {
4649  LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4650  return;
4651  }
4652  peer->m_getaddr_recvd = true;
4653 
4654  peer->m_addrs_to_send.clear();
4655  std::vector<CAddress> vAddr;
4657  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4658  } else {
4659  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4660  }
4661  for (const CAddress &addr : vAddr) {
4662  PushAddress(*peer, addr);
4663  }
4664  return;
4665  }
4666 
4667  if (msg_type == NetMsgType::MEMPOOL) {
4668  // Only process received mempool messages if we advertise NODE_BLOOM
4669  // or if the peer has mempool permissions.
4670  if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4671  {
4673  {
4674  LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs));
4675  pfrom.fDisconnect = true;
4676  }
4677  return;
4678  }
4679 
4680  if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4681  {
4683  {
4684  LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
4685  pfrom.fDisconnect = true;
4686  }
4687  return;
4688  }
4689 
4690  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4691  LOCK(tx_relay->m_tx_inventory_mutex);
4692  tx_relay->m_send_mempool = true;
4693  }
4694  return;
4695  }
4696 
4697  if (msg_type == NetMsgType::PING) {
4698  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4699  uint64_t nonce = 0;
4700  vRecv >> nonce;
4701  // Echo the message back with the nonce. This allows for two useful features:
4702  //
4703  // 1) A remote node can quickly check if the connection is operational
4704  // 2) Remote nodes can measure the latency of the network thread. If this node
4705  // is overloaded it won't respond to pings quickly and the remote node can
4706  // avoid sending us more work, like chain download requests.
4707  //
4708  // The nonce stops the remote getting confused between different pings: without
4709  // it, if the remote node sends a ping once per second and this node takes 5
4710  // seconds to respond to each, the 5th ping the remote sends would appear to
4711  // return very quickly.
4712  MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
4713  }
4714  return;
4715  }
4716 
4717  if (msg_type == NetMsgType::PONG) {
4718  const auto ping_end = time_received;
4719  uint64_t nonce = 0;
4720  size_t nAvail = vRecv.in_avail();
4721  bool bPingFinished = false;
4722  std::string sProblem;
4723 
4724  if (nAvail >= sizeof(nonce)) {
4725  vRecv >> nonce;
4726 
4727  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
4728  if (peer->m_ping_nonce_sent != 0) {
4729  if (nonce == peer->m_ping_nonce_sent) {
4730  // Matching pong received, this ping is no longer outstanding
4731  bPingFinished = true;
4732  const auto ping_time = ping_end - peer->m_ping_start.load();
4733  if (ping_time.count() >= 0) {
4734  // Let connman know about this successful ping-pong
4735  pfrom.PongReceived(ping_time);
4736  } else {
4737  // This should never happen
4738  sProblem = "Timing mishap";
4739  }
4740  } else {
4741  // Nonce mismatches are normal when pings are overlapping
4742  sProblem = "Nonce mismatch";
4743  if (nonce == 0) {
4744  // This is most likely a bug in another implementation somewhere; cancel this ping
4745  bPingFinished = true;
4746  sProblem = "Nonce zero";
4747  }
4748  }
4749  } else {
4750  sProblem = "Unsolicited pong without ping";
4751  }
4752  } else {
4753  // This is most likely a bug in another implementation somewhere; cancel this ping
4754  bPingFinished = true;
4755  sProblem = "Short payload";
4756  }
4757 
4758  if (!(sProblem.empty())) {
4759  LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4760  pfrom.GetId(),
4761  sProblem,
4762  peer->m_ping_nonce_sent,
4763  nonce,
4764  nAvail);
4765  }
4766  if (bPingFinished) {
4767  peer->m_ping_nonce_sent = 0;
4768  }
4769  return;
4770  }
4771 
4772  if (msg_type == NetMsgType::FILTERLOAD) {
4773  if (!(peer->m_our_services & NODE_BLOOM)) {
4774  LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4775  pfrom.fDisconnect = true;
4776  return;
4777  }
4778  CBloomFilter filter;
4779  vRecv >> filter;
4780 
4781  if (!filter.IsWithinSizeConstraints())
4782  {
4783  // There is no excuse for sending a too-large filter
4784  Misbehaving(*peer, "too-large bloom filter");
4785  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4786  {
4787  LOCK(tx_relay->m_bloom_filter_mutex);
4788  tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
4789  tx_relay->m_relay_txs = true;
4790  }
4791  pfrom.m_bloom_filter_loaded = true;
4792  pfrom.m_relays_txs = true;
4793  }
4794  return;
4795  }
4796 
4797  if (msg_type == NetMsgType::FILTERADD) {
4798  if (!(peer->m_our_services & NODE_BLOOM)) {
4799  LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4800  pfrom.fDisconnect = true;
4801  return;
4802  }
4803  std::vector<unsigned char> vData;
4804  vRecv >> vData;
4805 
4806  // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object,
4807  // and thus, the maximum size any matched object can have) in a filteradd message
4808  bool bad = false;
4809  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4810  bad = true;
4811  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4812  LOCK(tx_relay->m_bloom_filter_mutex);
4813  if (tx_relay->m_bloom_filter) {
4814  tx_relay->m_bloom_filter->insert(vData);
4815  } else {
4816  bad = true;
4817  }
4818  }
4819  if (bad) {
4820  Misbehaving(*peer, "bad filteradd message");
4821  }
4822  return;
4823  }
4824 
4825  if (msg_type == NetMsgType::FILTERCLEAR) {
4826  if (!(peer->m_our_services & NODE_BLOOM)) {
4827  LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
4828  pfrom.fDisconnect = true;
4829  return;
4830  }
4831  auto tx_relay = peer->GetTxRelay();
4832  if (!tx_relay) return;
4833 
4834  {
4835  LOCK(tx_relay->m_bloom_filter_mutex);
4836  tx_relay->m_bloom_filter = nullptr;
4837  tx_relay->m_relay_txs = true;
4838  }
4839  pfrom.m_bloom_filter_loaded = false;
4840  pfrom.m_relays_txs = true;
4841  return;
4842  }
4843 
4844  if (msg_type == NetMsgType::FEEFILTER) {
4845  CAmount newFeeFilter = 0;
4846  vRecv >> newFeeFilter;
4847  if (MoneyRange(newFeeFilter)) {
4848  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4849  tx_relay->m_fee_filter_received = newFeeFilter;
4850  }
4851  LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4852  }
4853  return;
4854  }
4855 
4856  if (msg_type == NetMsgType::GETCFILTERS) {
4857  ProcessGetCFilters(pfrom, *peer, vRecv);
4858  return;
4859  }
4860 
4861  if (msg_type == NetMsgType::GETCFHEADERS) {
4862  ProcessGetCFHeaders(pfrom, *peer, vRecv);
4863  return;
4864  }
4865 
4866  if (msg_type == NetMsgType::GETCFCHECKPT) {
4867  ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4868  return;
4869  }
4870 
4871  if (msg_type == NetMsgType::NOTFOUND) {
4872  std::vector<CInv> vInv;
4873  vRecv >> vInv;
4874  std::vector<uint256> tx_invs;
4876  for (CInv &inv : vInv) {
4877  if (inv.IsGenTxMsg()) {
4878  tx_invs.emplace_back(inv.hash);
4879  }
4880  }
4881  }
4882  LOCK(m_tx_download_mutex);
4883  m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs);
4884  return;
4885  }
4886 
4887  // Ignore unknown commands for extensibility
4888  LogDebug(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4889  return;
4890 }
4891 
4892 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
4893 {
4894  {
4895  LOCK(peer.m_misbehavior_mutex);
4896 
4897  // There's nothing to do if the m_should_discourage flag isn't set
4898  if (!peer.m_should_discourage) return false;
4899 
4900  peer.m_should_discourage = false;
4901  } // peer.m_misbehavior_mutex
4902 
4904  // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
4905  LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
4906  return false;
4907  }
4908 
4909  if (pnode.IsManualConn()) {
4910  // We never disconnect or discourage manual peers for bad behavior
4911  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
4912  return false;
4913  }
4914 
4915  if (pnode.addr.IsLocal()) {
4916  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
4917  // all peers on the same local address)
4918  LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
4919  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
4920  pnode.fDisconnect = true;
4921  return true;
4922  }
4923 
4924  // Normal case: Disconnect the peer and discourage all nodes sharing the address
4925  LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
4926  if (m_banman) m_banman->Discourage(pnode.addr);
4927  m_connman.DisconnectNode(pnode.addr);
4928  return true;
4929 }
4930 
4931 bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
4932 {
4933  AssertLockNotHeld(m_tx_download_mutex);
4934  AssertLockHeld(g_msgproc_mutex);
4935 
4936  PeerRef peer = GetPeerRef(pfrom->GetId());
4937  if (peer == nullptr) return false;
4938 
4939  // For outbound connections, ensure that the initial VERSION message
4940  // has been sent first before processing any incoming messages
4941  if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false;
4942 
4943  {
4944  LOCK(peer->m_getdata_requests_mutex);
4945  if (!peer->m_getdata_requests.empty()) {
4946  ProcessGetData(*pfrom, *peer, interruptMsgProc);
4947  }
4948  }
4949 
4950  const bool processed_orphan = ProcessOrphanTx(*peer);
4951 
4952  if (pfrom->fDisconnect)
4953  return false;
4954 
4955  if (processed_orphan) return true;
4956 
4957  // this maintains the order of responses
4958  // and prevents m_getdata_requests to grow unbounded
4959  {
4960  LOCK(peer->m_getdata_requests_mutex);
4961  if (!peer->m_getdata_requests.empty()) return true;
4962  }
4963 
4964  // Don't bother if send buffer is too full to respond anyway
4965  if (pfrom->fPauseSend) return false;
4966 
4967  auto poll_result{pfrom->PollMessage()};
4968  if (!poll_result) {
4969  // No message to process
4970  return false;
4971  }
4972 
4973  CNetMessage& msg{poll_result->first};
4974  bool fMoreWork = poll_result->second;
4975 
4976  TRACEPOINT(net, inbound_message,
4977  pfrom->GetId(),
4978  pfrom->m_addr_name.c_str(),
4979  pfrom->ConnectionTypeAsString().c_str(),
4980  msg.m_type.c_str(),
4981  msg.m_recv.size(),
4982  msg.m_recv.data()
4983  );
4984 
4985  if (m_opts.capture_messages) {
4986  CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
4987  }
4988 
4989  try {
4990  ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
4991  if (interruptMsgProc) return false;
4992  {
4993  LOCK(peer->m_getdata_requests_mutex);
4994  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
4995  }
4996  // Does this peer has an orphan ready to reconsider?
4997  // (Note: we may have provided a parent for an orphan provided
4998  // by another peer that was already processed; in that case,
4999  // the extra work may not be noticed, possibly resulting in an
5000  // unnecessary 100ms delay)
5001  LOCK(m_tx_download_mutex);
5002  if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true;
5003  } catch (const std::exception& e) {
5004  LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5005  } catch (...) {
5006  LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5007  }
5008 
5009  return fMoreWork;
5010 }
5011 
5012 void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5013 {
5015 
5016  CNodeState &state = *State(pto.GetId());
5017 
5018  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5019  // This is an outbound peer subject to disconnection if they don't
5020  // announce a block with as much work as the current tip within
5021  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5022  // their chain has more work than ours, we should sync to it,
5023  // unless it's invalid, in which case we should find that out and
5024  // disconnect from them elsewhere).
5025  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5026  // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set
5027  if (state.m_chain_sync.m_timeout != 0s) {
5028  state.m_chain_sync.m_timeout = 0s;
5029  state.m_chain_sync.m_work_header = nullptr;
5030  state.m_chain_sync.m_sent_getheaders = false;
5031  }
5032  } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5033  // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours
5034  // AND
5035  // we are noticing this for the first time (m_timeout is 0)
5036  // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout
5037  // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced).
5038  // Either way, set a new timeout based on our current tip.
5039  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5040  state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5041  state.m_chain_sync.m_sent_getheaders = false;
5042  } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5043  // No evidence yet that our peer has synced to a chain with work equal to that
5044  // of our tip, when we first detected it was behind. Send a single getheaders
5045  // message to give the peer a chance to update us.
5046  if (state.m_chain_sync.m_sent_getheaders) {
5047  // They've run out of time to catch up!
5048  LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs));
5049  pto.fDisconnect = true;
5050  } else {
5051  assert(state.m_chain_sync.m_work_header);
5052  // Here, we assume that the getheaders message goes out,
5053  // because it'll either go out or be skipped because of a
5054  // getheaders in-flight already, in which case the peer should
5055  // still respond to us with a sufficiently high work chain tip.
5056  MaybeSendGetHeaders(pto,
5057  GetLocator(state.m_chain_sync.m_work_header->pprev),
5058  peer);
5059  LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5060  state.m_chain_sync.m_sent_getheaders = true;
5061  // Bump the timeout to allow a response, which could clear the timeout
5062  // (if the response shows the peer has synced), reset the timeout (if
5063  // the peer syncs to the required work but not to our tip), or result
5064  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5065  // has not sufficiently progressed)
5066  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5067  }
5068  }
5069  }
5070 }
5071 
5072 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5073 {
5074  // If we have any extra block-relay-only peers, disconnect the youngest unless
5075  // it's given us a block -- in which case, compare with the second-youngest, and
5076  // out of those two, disconnect the peer who least recently gave us a block.
5077  // The youngest block-relay-only peer would be the extra peer we connected
5078  // to temporarily in order to sync our tip; see net.cpp.
5079  // Note that we use higher nodeid as a measure for most recent connection.
5080  if (m_connman.GetExtraBlockRelayCount() > 0) {
5081  std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5082 
5083  m_connman.ForEachNode([&](CNode* pnode) {
5084  if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5085  if (pnode->GetId() > youngest_peer.first) {
5086  next_youngest_peer = youngest_peer;
5087  youngest_peer.first = pnode->GetId();
5088  youngest_peer.second = pnode->m_last_block_time;
5089  }
5090  });
5091  NodeId to_disconnect = youngest_peer.first;
5092  if (youngest_peer.second > next_youngest_peer.second) {
5093  // Our newest block-relay-only peer gave us a block more recently;
5094  // disconnect our second youngest.
5095  to_disconnect = next_youngest_peer.first;
5096  }
5097  m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5099  // Make sure we're not getting a block right now, and that
5100  // we've been connected long enough for this eviction to happen
5101  // at all.
5102  // Note that we only request blocks from a peer if we learn of a
5103  // valid headers chain with at least as much work as our tip.
5104  CNodeState *node_state = State(pnode->GetId());
5105  if (node_state == nullptr ||
5106  (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5107  pnode->fDisconnect = true;
5108  LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5109  pnode->GetId(), count_seconds(pnode->m_last_block_time));
5110  return true;
5111  } else {
5112  LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5113  pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5114  }
5115  return false;
5116  });
5117  }
5118 
5119  // Check whether we have too many outbound-full-relay peers
5120  if (m_connman.GetExtraFullOutboundCount() > 0) {
5121  // If we have more outbound-full-relay peers than we target, disconnect one.
5122  // Pick the outbound-full-relay peer that least recently announced
5123  // us a new block, with ties broken by choosing the more recent
5124  // connection (higher node id)
5125  // Protect peers from eviction if we don't have another connection
5126  // to their network, counting both outbound-full-relay and manual peers.
5127  NodeId worst_peer = -1;
5128  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5129 
5130  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5132 
5133  // Only consider outbound-full-relay peers that are not already
5134  // marked for disconnection
5135  if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5136  CNodeState *state = State(pnode->GetId());
5137  if (state == nullptr) return; // shouldn't be possible, but just in case
5138  // Don't evict our protected peers
5139  if (state->m_chain_sync.m_protect) return;
5140  // If this is the only connection on a particular network that is
5141  // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5142  if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5143  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5144  worst_peer = pnode->GetId();
5145  oldest_block_announcement = state->m_last_block_announcement;
5146  }
5147  });
5148  if (worst_peer != -1) {
5149  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5151 
5152  // Only disconnect a peer that has been connected to us for
5153  // some reasonable fraction of our check-frequency, to give
5154  // it time for new information to have arrived.
5155  // Also don't disconnect any peer we're trying to download a
5156  // block from.
5157  CNodeState &state = *State(pnode->GetId());
5158  if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5159  LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5160  pnode->fDisconnect = true;
5161  return true;
5162  } else {
5163  LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5164  pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5165  return false;
5166  }
5167  });
5168  if (disconnected) {
5169  // If we disconnected an extra peer, that means we successfully
5170  // connected to at least one peer after the last time we
5171  // detected a stale tip. Don't try any more extra peers until
5172  // we next detect a stale tip, to limit the load we put on the
5173  // network from these extra connections.
5174  m_connman.SetTryNewOutboundPeer(false);
5175  }
5176  }
5177  }
5178 }
5179 
5180 void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5181 {
5182  LOCK(cs_main);
5183 
5184  auto now{GetTime<std::chrono::seconds>()};
5185 
5186  EvictExtraOutboundPeers(now);
5187 
5188  if (now > m_stale_tip_check_time) {
5189  // Check whether our tip is stale, and if so, allow using an extra
5190  // outbound peer
5191  if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5192  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5193  count_seconds(now - m_last_tip_update.load()));
5194  m_connman.SetTryNewOutboundPeer(true);
5195  } else if (m_connman.GetTryNewOutboundPeer()) {
5196  m_connman.SetTryNewOutboundPeer(false);
5197  }
5198  m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5199  }
5200 
5201  if (!m_initial_sync_finished && CanDirectFetch()) {
5202  m_connman.StartExtraBlockRelayPeers();
5203  m_initial_sync_finished = true;
5204  }
5205 }
5206 
5207 void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5208 {
5209  if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5210  peer.m_ping_nonce_sent &&
5211  now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5212  {
5213  // The ping timeout is using mocktime. To disable the check during
5214  // testing, increase -peertimeout.
5215  LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs));
5216  node_to.fDisconnect = true;
5217  return;
5218  }
5219 
5220  bool pingSend = false;
5221 
5222  if (peer.m_ping_queued) {
5223  // RPC ping request by user
5224  pingSend = true;
5225  }
5226 
5227  if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5228  // Ping automatically sent as a latency probe & keepalive.
5229  pingSend = true;
5230  }
5231 
5232  if (pingSend) {
5233  uint64_t nonce;
5234  do {
5236  } while (nonce == 0);
5237  peer.m_ping_queued = false;
5238  peer.m_ping_start = now;
5239  if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5240  peer.m_ping_nonce_sent = nonce;
5241  MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
5242  } else {
5243  // Peer is too old to support ping command with nonce, pong will never arrive.
5244  peer.m_ping_nonce_sent = 0;
5245  MakeAndPushMessage(node_to, NetMsgType::PING);
5246  }
5247  }
5248 }
5249 
5250 void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5251 {
5252  // Nothing to do for non-address-relay peers
5253  if (!peer.m_addr_relay_enabled) return;
5254 
5255  LOCK(peer.m_addr_send_times_mutex);
5256  // Periodically advertise our local address to the peer.
5257  if (fListen && !m_chainman.IsInitialBlockDownload() &&
5258  peer.m_next_local_addr_send < current_time) {
5259  // If we've sent before, clear the bloom filter for the peer, so that our
5260  // self-announcement will actually go out.
5261  // This might be unnecessary if the bloom filter has already rolled
5262  // over since our last self-announcement, but there is only a small
5263  // bandwidth cost that we can incur by doing this (which happens
5264  // once a day on average).
5265  if (peer.m_next_local_addr_send != 0us) {
5266  peer.m_addr_known->reset();
5267  }
5268  if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5269  CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5270  PushAddress(peer, local_addr);
5271  }
5272  peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5273  }
5274 
5275  // We sent an `addr` message to this peer recently. Nothing more to do.
5276  if (current_time <= peer.m_next_addr_send) return;
5277 
5278  peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
5279 
5280  if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5281  // Should be impossible since we always check size before adding to
5282  // m_addrs_to_send. Recover by trimming the vector.
5283  peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5284  }
5285 
5286  // Remove addr records that the peer already knows about, and add new
5287  // addrs to the m_addr_known filter on the same pass.
5288  auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5289  bool ret = peer.m_addr_known->contains(addr.GetKey());
5290  if (!ret) peer.m_addr_known->insert(addr.GetKey());
5291  return ret;
5292  };
5293  peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5294  peer.m_addrs_to_send.end());
5295 
5296  // No addr messages to send
5297  if (peer.m_addrs_to_send.empty()) return;
5298 
5299  if (peer.m_wants_addrv2) {
5300  MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send));
5301  } else {
5302  MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send));
5303  }
5304  peer.m_addrs_to_send.clear();
5305 
5306  // we only send the big addr message once
5307  if (peer.m_addrs_to_send.capacity() > 40) {
5308  peer.m_addrs_to_send.shrink_to_fit();
5309  }
5310 }
5311 
5312 void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5313 {
5314  // Delay sending SENDHEADERS (BIP 130) until we're done with an
5315  // initial-headers-sync with this peer. Receiving headers announcements for
5316  // new blocks while trying to sync their headers chain is problematic,
5317  // because of the state tracking done.
5318  if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5319  LOCK(cs_main);
5320  CNodeState &state = *State(node.GetId());
5321  if (state.pindexBestKnownBlock != nullptr &&
5322  state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5323  // Tell our peer we prefer to receive headers rather than inv's
5324  // We send this to non-NODE NETWORK peers as well, because even
5325  // non-NODE NETWORK peers can announce blocks (such as pruning
5326  // nodes)
5327  MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
5328  peer.m_sent_sendheaders = true;
5329  }
5330  }
5331 }
5332 
5333 void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5334 {
5335  if (m_opts.ignore_incoming_txs) return;
5336  if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5337  // peers with the forcerelay permission should not filter txs to us
5339  // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5340  // transactions to us, regardless of feefilter state.
5341  if (pto.IsBlockOnlyConn()) return;
5342 
5343  CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5344 
5345  if (m_chainman.IsInitialBlockDownload()) {
5346  // Received tx-inv messages are discarded when the active
5347  // chainstate is in IBD, so tell the peer to not send them.
5348  currentFilter = MAX_MONEY;
5349  } else {
5350  static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5351  if (peer.m_fee_filter_sent == MAX_FILTER) {
5352  // Send the current filter if we sent MAX_FILTER previously
5353  // and made it out of IBD.
5354  peer.m_next_send_feefilter = 0us;
5355  }
5356  }
5357  if (current_time > peer.m_next_send_feefilter) {
5358  CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5359  // We always have a fee filter of at least the min relay fee
5360  filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK());
5361  if (filterToSend != peer.m_fee_filter_sent) {
5362  MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
5363  peer.m_fee_filter_sent = filterToSend;
5364  }
5365  peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
5366  }
5367  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5368  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5369  else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5370  (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5371  peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5372  }
5373 }
5374 
5375 namespace {
5376 class CompareInvMempoolOrder
5377 {
5378  CTxMemPool* mp;
5379  bool m_wtxid_relay;
5380 public:
5381  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5382  {
5383  mp = _mempool;
5384  m_wtxid_relay = use_wtxid;
5385  }
5386 
5387  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5388  {
5389  /* As std::make_heap produces a max-heap, we want the entries with the
5390  * fewest ancestors/highest fee to sort later. */
5391  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5392  }
5393 };
5394 } // namespace
5395 
5396 bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5397 {
5398  // block-relay-only peers may never send txs to us
5399  if (peer.IsBlockOnlyConn()) return true;
5400  if (peer.IsFeelerConn()) return true;
5401  // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5402  if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5403  return false;
5404 }
5405 
5406 bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5407 {
5408  // We don't participate in addr relay with outbound block-relay-only
5409  // connections to prevent providing adversaries with the additional
5410  // information of addr traffic to infer the link.
5411  if (node.IsBlockOnlyConn()) return false;
5412 
5413  if (!peer.m_addr_relay_enabled.exchange(true)) {
5414  // During version message processing (non-block-relay-only outbound peers)
5415  // or on first addr-related message we have received (inbound peers), initialize
5416  // m_addr_known.
5417  peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5418  }
5419 
5420  return true;
5421 }
5422 
5423 bool PeerManagerImpl::SendMessages(CNode* pto)
5424 {
5425  AssertLockNotHeld(m_tx_download_mutex);
5426  AssertLockHeld(g_msgproc_mutex);
5427 
5428  PeerRef peer = GetPeerRef(pto->GetId());
5429  if (!peer) return false;
5430  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5431 
5432  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5433  // disconnect misbehaving peers even before the version handshake is complete.
5434  if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5435 
5436  // Initiate version handshake for outbound connections
5437  if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) {
5438  PushNodeVersion(*pto, *peer);
5439  peer->m_outbound_version_message_sent = true;
5440  }
5441 
5442  // Don't send anything until the version handshake is complete
5443  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5444  return true;
5445 
5446  const auto current_time{GetTime<std::chrono::microseconds>()};
5447 
5448  if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5449  LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs));
5450  pto->fDisconnect = true;
5451  return true;
5452  }
5453 
5454  MaybeSendPing(*pto, *peer, current_time);
5455 
5456  // MaybeSendPing may have marked peer for disconnection
5457  if (pto->fDisconnect) return true;
5458 
5459  MaybeSendAddr(*pto, *peer, current_time);
5460 
5461  MaybeSendSendHeaders(*pto, *peer);
5462 
5463  {
5464  LOCK(cs_main);
5465 
5466  CNodeState &state = *State(pto->GetId());
5467 
5468  // Start block sync
5469  if (m_chainman.m_best_header == nullptr) {
5470  m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5471  }
5472 
5473  // Determine whether we might try initial headers sync or parallel
5474  // block download from this peer -- this mostly affects behavior while
5475  // in IBD (once out of IBD, we sync from all peers).
5476  bool sync_blocks_and_headers_from_peer = false;
5477  if (state.fPreferredDownload) {
5478  sync_blocks_and_headers_from_peer = true;
5479  } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5480  // Typically this is an inbound peer. If we don't have any outbound
5481  // peers, or if we aren't downloading any blocks from such peers,
5482  // then allow block downloads from this peer, too.
5483  // We prefer downloading blocks from outbound peers to avoid
5484  // putting undue load on (say) some home user who is just making
5485  // outbound connections to the network, but if our only source of
5486  // the latest blocks is from an inbound peer, we have to be sure to
5487  // eventually download it (and not just wait indefinitely for an
5488  // outbound peer to have it).
5489  if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5490  sync_blocks_and_headers_from_peer = true;
5491  }
5492  }
5493 
5494  if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5495  // Only actively request headers from a single peer, unless we're close to today.
5496  if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
5497  const CBlockIndex* pindexStart = m_chainman.m_best_header;
5498  /* If possible, start at the block preceding the currently
5499  best known header. This ensures that we always get a
5500  non-empty list of headers back as long as the peer
5501  is up-to-date. With a non-empty response, we can initialise
5502  the peer's known best block. This wouldn't be possible
5503  if we requested starting at m_chainman.m_best_header and
5504  got back an empty response. */
5505  if (pindexStart->pprev)
5506  pindexStart = pindexStart->pprev;
5507  if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5508  LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5509 
5510  state.fSyncStarted = true;
5511  peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5512  (
5513  // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5514  // to maintain precision
5515  std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5516  Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5517  );
5518  nSyncStarted++;
5519  }
5520  }
5521  }
5522 
5523  //
5524  // Try sending block announcements via headers
5525  //
5526  {
5527  // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5528  // list of block hashes we're relaying, and our peer wants
5529  // headers announcements, then find the first header
5530  // not yet known to our peer but would connect, and send.
5531  // If no header would connect, or if we have too many
5532  // blocks, or if the peer doesn't want headers, just
5533  // add all to the inv queue.
5534  LOCK(peer->m_block_inv_mutex);
5535  std::vector<CBlock> vHeaders;
5536  bool fRevertToInv = ((!peer->m_prefers_headers &&
5537  (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5538  peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5539  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5540  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5541 
5542  if (!fRevertToInv) {
5543  bool fFoundStartingHeader = false;
5544  // Try to find first header that our peer doesn't have, and
5545  // then send all headers past that one. If we come across any
5546  // headers that aren't on m_chainman.ActiveChain(), give up.
5547  for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5548  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5549  assert(pindex);
5550  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5551  // Bail out if we reorged away from this block
5552  fRevertToInv = true;
5553  break;
5554  }
5555  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5556  // This means that the list of blocks to announce don't
5557  // connect to each other.
5558  // This shouldn't really be possible to hit during
5559  // regular operation (because reorgs should take us to
5560  // a chain that has some block not on the prior chain,
5561  // which should be caught by the prior check), but one
5562  // way this could happen is by using invalidateblock /
5563  // reconsiderblock repeatedly on the tip, causing it to
5564  // be added multiple times to m_blocks_for_headers_relay.
5565  // Robustly deal with this rare situation by reverting
5566  // to an inv.
5567  fRevertToInv = true;
5568  break;
5569  }
5570  pBestIndex = pindex;
5571  if (fFoundStartingHeader) {
5572  // add this to the headers message
5573  vHeaders.emplace_back(pindex->GetBlockHeader());
5574  } else if (PeerHasHeader(&state, pindex)) {
5575  continue; // keep looking for the first new block
5576  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5577  // Peer doesn't have this header but they do have the prior one.
5578  // Start sending headers.
5579  fFoundStartingHeader = true;
5580  vHeaders.emplace_back(pindex->GetBlockHeader());
5581  } else {
5582  // Peer doesn't have this header or the prior one -- nothing will
5583  // connect, so bail out.
5584  fRevertToInv = true;
5585  break;
5586  }
5587  }
5588  }
5589  if (!fRevertToInv && !vHeaders.empty()) {
5590  if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5591  // We only send up to 1 block as header-and-ids, as otherwise
5592  // probably means we're doing an initial-ish-sync or they're slow
5593  LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5594  vHeaders.front().GetHash().ToString(), pto->GetId());
5595 
5596  std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5597  {
5598  LOCK(m_most_recent_block_mutex);
5599  if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5600  cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5601  }
5602  }
5603  if (cached_cmpctblock_msg.has_value()) {
5604  PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
5605  } else {
5606  CBlock block;
5607  const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)};
5608  assert(ret);
5609  CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()};
5610  MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock);
5611  }
5612  state.pindexBestHeaderSent = pBestIndex;
5613  } else if (peer->m_prefers_headers) {
5614  if (vHeaders.size() > 1) {
5615  LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5616  vHeaders.size(),
5617  vHeaders.front().GetHash().ToString(),
5618  vHeaders.back().GetHash().ToString(), pto->GetId());
5619  } else {
5620  LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5621  vHeaders.front().GetHash().ToString(), pto->GetId());
5622  }
5623  MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
5624  state.pindexBestHeaderSent = pBestIndex;
5625  } else
5626  fRevertToInv = true;
5627  }
5628  if (fRevertToInv) {
5629  // If falling back to using an inv, just try to inv the tip.
5630  // The last entry in m_blocks_for_headers_relay was our tip at some point
5631  // in the past.
5632  if (!peer->m_blocks_for_headers_relay.empty()) {
5633  const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5634  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5635  assert(pindex);
5636 
5637  // Warn if we're announcing a block that is not on the main chain.
5638  // This should be very rare and could be optimized out.
5639  // Just log for now.
5640  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5641  LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5642  hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5643  }
5644 
5645  // If the peer's chain has this block, don't inv it back.
5646  if (!PeerHasHeader(&state, pindex)) {
5647  peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5648  LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5649  pto->GetId(), hashToAnnounce.ToString());
5650  }
5651  }
5652  }
5653  peer->m_blocks_for_headers_relay.clear();
5654  }
5655 
5656  //
5657  // Message: inventory
5658  //
5659  std::vector<CInv> vInv;
5660  {
5661  LOCK(peer->m_block_inv_mutex);
5662  vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5663 
5664  // Add blocks
5665  for (const uint256& hash : peer->m_blocks_for_inv_relay) {
5666  vInv.emplace_back(MSG_BLOCK, hash);
5667  if (vInv.size() == MAX_INV_SZ) {
5668  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5669  vInv.clear();
5670  }
5671  }
5672  peer->m_blocks_for_inv_relay.clear();
5673  }
5674 
5675  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5676  LOCK(tx_relay->m_tx_inventory_mutex);
5677  // Check whether periodic sends should happen
5678  bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
5679  if (tx_relay->m_next_inv_send_time < current_time) {
5680  fSendTrickle = true;
5681  if (pto->IsInboundConn()) {
5682  tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5683  } else {
5684  tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5685  }
5686  }
5687 
5688  // Time to send but the peer has requested we not relay transactions.
5689  if (fSendTrickle) {
5690  LOCK(tx_relay->m_bloom_filter_mutex);
5691  if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5692  }
5693 
5694  // Respond to BIP35 mempool requests
5695  if (fSendTrickle && tx_relay->m_send_mempool) {
5696  auto vtxinfo = m_mempool.infoAll();
5697  tx_relay->m_send_mempool = false;
5698  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5699 
5700  LOCK(tx_relay->m_bloom_filter_mutex);
5701 
5702  for (const auto& txinfo : vtxinfo) {
5703  CInv inv{
5704  peer->m_wtxid_relay ? MSG_WTX : MSG_TX,
5705  peer->m_wtxid_relay ?
5706  txinfo.tx->GetWitnessHash().ToUint256() :
5707  txinfo.tx->GetHash().ToUint256(),
5708  };
5709  tx_relay->m_tx_inventory_to_send.erase(inv.hash);
5710 
5711  // Don't send transactions that peers will not put into their mempool
5712  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5713  continue;
5714  }
5715  if (tx_relay->m_bloom_filter) {
5716  if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5717  }
5718  tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
5719  vInv.push_back(inv);
5720  if (vInv.size() == MAX_INV_SZ) {
5721  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5722  vInv.clear();
5723  }
5724  }
5725  }
5726 
5727  // Determine transactions to relay
5728  if (fSendTrickle) {
5729  // Produce a vector with all candidates for sending
5730  std::vector<std::set<uint256>::iterator> vInvTx;
5731  vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5732  for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5733  vInvTx.push_back(it);
5734  }
5735  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5736  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5737  // A heap is used so that not all items need sorting if only a few are being sent.
5738  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
5739  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5740  // No reason to drain out at many times the network's capacity,
5741  // especially since we have many peers and some will draw much shorter delays.
5742  unsigned int nRelayedTransactions = 0;
5743  LOCK(tx_relay->m_bloom_filter_mutex);
5744  size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
5745  broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
5746  while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5747  // Fetch the top element from the heap
5748  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5749  std::set<uint256>::iterator it = vInvTx.back();
5750  vInvTx.pop_back();
5751  uint256 hash = *it;
5752  CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5753  // Remove it from the to-be-sent set
5754  tx_relay->m_tx_inventory_to_send.erase(it);
5755  // Check if not in the filter already
5756  if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
5757  continue;
5758  }
5759  // Not in the mempool anymore? don't bother sending it.
5760  auto txinfo = m_mempool.info(ToGenTxid(inv));
5761  if (!txinfo.tx) {
5762  continue;
5763  }
5764  // Peer told you to not send transactions at that feerate? Don't bother sending it.
5765  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5766  continue;
5767  }
5768  if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5769  // Send
5770  vInv.push_back(inv);
5771  nRelayedTransactions++;
5772  if (vInv.size() == MAX_INV_SZ) {
5773  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5774  vInv.clear();
5775  }
5776  tx_relay->m_tx_inventory_known_filter.insert(hash);
5777  }
5778 
5779  // Ensure we'll respond to GETDATA requests for anything we've just announced
5780  LOCK(m_mempool.cs);
5781  tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
5782  }
5783  }
5784  if (!vInv.empty())
5785  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5786 
5787  // Detect whether we're stalling
5788  auto stalling_timeout = m_block_stalling_timeout.load();
5789  if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5790  // Stalling only triggers when the block download window cannot move. During normal steady state,
5791  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5792  // should only happen during initial block download.
5793  LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs));
5794  pto->fDisconnect = true;
5795  // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
5796  // bandwidth is insufficient.
5797  const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
5798  if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5799  LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
5800  }
5801  return true;
5802  }
5803  // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
5804  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
5805  // We compensate for other peers to prevent killing off peers due to our own downstream link
5806  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5807  // to unreasonably increase our timeout.
5808  if (state.vBlocksInFlight.size() > 0) {
5809  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5810  int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5811  if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
5812  LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
5813  pto->fDisconnect = true;
5814  return true;
5815  }
5816  }
5817  // Check for headers sync timeouts
5818  if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5819  // Detect whether this is a stalling initial-headers-sync peer
5820  if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
5821  if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5822  // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5823  // and we have others we could be using instead.
5824  // Note: If all our peers are inbound, then we won't
5825  // disconnect our sync peer for stalling; we have bigger
5826  // problems if we can't get any outbound peers.
5828  LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
5829  pto->fDisconnect = true;
5830  return true;
5831  } else {
5832  LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs));
5833  // Reset the headers sync state so that we have a
5834  // chance to try downloading from a different peer.
5835  // Note: this will also result in at least one more
5836  // getheaders message to be sent to
5837  // this peer (eventually).
5838  state.fSyncStarted = false;
5839  nSyncStarted--;
5840  peer->m_headers_sync_timeout = 0us;
5841  }
5842  }
5843  } else {
5844  // After we've caught up once, reset the timeout so we can't trigger
5845  // disconnect later.
5846  peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5847  }
5848  }
5849 
5850  // Check that outbound peers have reasonable chains
5851  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
5852  ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5853 
5854  //
5855  // Message: getdata (blocks)
5856  //
5857  std::vector<CInv> vGetData;
5858  if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5859  std::vector<const CBlockIndex*> vToDownload;
5860  NodeId staller = -1;
5861  auto get_inflight_budget = [&state]() {
5862  return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
5863  };
5864 
5865  // If a snapshot chainstate is in use, we want to find its next blocks
5866  // before the background chainstate to prioritize getting to network tip.
5867  FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
5868  if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
5869  // If the background tip is not an ancestor of the snapshot block,
5870  // we need to start requesting blocks from their last common ancestor.
5871  const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock());
5872  TryDownloadingHistoricalBlocks(
5873  *peer,
5874  get_inflight_budget(),
5875  vToDownload, from_tip,
5876  Assert(m_chainman.GetSnapshotBaseBlock()));
5877  }
5878  for (const CBlockIndex *pindex : vToDownload) {
5879  uint32_t nFetchFlags = GetFetchFlags(*peer);
5880  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
5881  BlockRequested(pto->GetId(), *pindex);
5882  LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
5883  pindex->nHeight, pto->GetId());
5884  }
5885  if (state.vBlocksInFlight.empty() && staller != -1) {
5886  if (State(staller)->m_stalling_since == 0us) {
5887  State(staller)->m_stalling_since = current_time;
5888  LogDebug(BCLog::NET, "Stall started peer=%d\n", staller);
5889  }
5890  }
5891  }
5892 
5893  //
5894  // Message: getdata (transactions)
5895  //
5896  {
5897  LOCK(m_tx_download_mutex);
5898  for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) {
5899  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
5900  if (vGetData.size() >= MAX_GETDATA_SZ) {
5901  MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
5902  vGetData.clear();
5903  }
5904  }
5905  }
5906 
5907  if (!vGetData.empty())
5908  MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
5909  } // release cs_main
5910  MaybeSendFeefilter(*pto, *peer, current_time);
5911  return true;
5912 }
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:423
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:1011
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:165
bool IsMsgWtx() const
Definition: protocol.h:509
enum ReadStatus_t ReadStatus
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1007
constexpr const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
Definition: protocol.h:266
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
constexpr const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.h:242
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
int ret
std::atomic_bool fPauseSend
Definition: net.h:737
invalid by consensus rules
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition: chain.h:259
std::chrono::time_point< NodeClock > time_point
Definition: time.h:19
AssertLockHeld(pool.cs)
bool IsMsgTx() const
Definition: protocol.h:507
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
bool ProcessNewBlockHeaders(std::span< const CBlockHeader > headers, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
Definition: banman.h:58
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:309
bool ReadRawBlock(std::vector< uint8_t > &block, const FlatFilePos &pos) const
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:349
bool IsLocal() const
Definition: netaddress.cpp:402
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3851
int64_t GetBlockTime() const
Definition: chain.h:266
assert(!tx.IsCoinBase())
NodeSeconds Time() const
Definition: chain.h:261
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:123
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:147
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:863
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
uint64_t m_addr_rate_limited
unsigned int nonce
Definition: miner_tests.cpp:75
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:3736
Valid, transaction was already in the mempool.
std::string ToString() const
Definition: protocol.cpp:77
RecursiveMutex & GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex)
bool exists(const GenTxid &gtxid) const
Definition: txmempool.h:647
Definition: block.h:68
ReconciliationRegisterResult
We don&#39;t have the previous block the checked one is built on.
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:339
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:851
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:865
std::vector< uint16_t > indexes
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1172
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:760
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
bool IsMsgFilteredBlk() const
Definition: protocol.h:510
An in-memory indexed chain of blocks.
Definition: chain.h:416
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:706
std::string LogIP(bool log_ip) const
Helper function to optionally log the IP address.
Definition: net.cpp:710
Manages warning messages within a node.
Definition: warnings.h:39
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:1063
void Discourage(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Definition: banman.cpp:124
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
transaction was not validated because package failed
inv message data
Definition: protocol.h:493
invalid proof of work or time too old
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, node::Warnings &warnings, Options opts)
constexpr const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.h:186
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:68
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:165
transaction was missing some of its inputs
bool IsMsgCmpctBlk() const
Definition: protocol.h:511
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network, const bool filtered=true) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:3503
bool IsFeelerConn() const
Definition: net.h:803
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1111
int in_avail() const
Definition: streams.h:216
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:97
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:27
CBlockHeader GetBlockHeader() const
Definition: chain.h:230
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
int Height() const
Return the maximal height in the chain.
Definition: chain.h:462
constexpr const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.h:156
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
static constexpr SerParams V2_NETWORK
Definition: protocol.h:409
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: policy.h:66
unsigned long size() const
Definition: txmempool.h:629
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:107
void SetCommonVersion(int greatest_common_version)
Definition: net.h:919
We&#39;re done syncing with this peer and can discard any remaining state.
uint64_t GetSequence() const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.h:705
constexpr const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.h:260
bool ReadBlock(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
constexpr const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.h:144
Defined in BIP152.
Definition: protocol.h:484
constexpr const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.h:123
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:108
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: netbase.h:124
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:80
violated mempool&#39;s fee/size/descendant/RBF/etc limits
the block header may be on a too-little-work chain
Mutex m_subver_mutex
Definition: net.h:717
bool IsNull() const
Definition: block.h:152
inputs (covered by txid) failed policy rules
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:2420
void ignore(size_t num_ignore)
Definition: streams.h:236
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:470
constexpr const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.h:81
uint64_t GetLocalNonce() const
Definition: net.h:898
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:318
constexpr const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.h:224
transaction spends a coinbase too early, or violates locktime/sequence locks
std::map< Wtxid, MempoolAcceptResult > m_tx_results
Map from wtxid to finished MempoolAcceptResults.
Definition: validation.h:237
static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS
Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers.
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
bool MultipleManualOrFullOutboundConns(Network net) const EXCLUSIVE_LOCKS_REQUIRED(m_nodes_mutex)
Definition: net.cpp:2492
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:42
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:57
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
initial value. Tx has not yet been rejected
bool GetTryNewOutboundPeer() const
Definition: net.cpp:2415
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
enum Network GetNetwork() const
Definition: netaddress.cpp:500
bool empty() const
Definition: streams.h:182
std::string ToStringAddrPort() const
Definition: netaddress.cpp:907
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we&#39;re only buildi...
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can&#39;t reach it ourselves.
Definition: netaddress.h:218
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
CSerializedNetMsg Make(std::string msg_type, Args &&... args)
constexpr const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.h:249
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
Stochastic address manager.
Definition: addrman.h:88
constexpr const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.h:172
bool IsBanned(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is banned.
Definition: banman.cpp:89
Transaction validation functions.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3917
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Functions to serialize / deserialize common bitcoin types.
Definition: common-types.h:57
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
bool IsValid() const
Definition: netaddress.cpp:428
bool DisconnectNode(const std::string &node)
Definition: net.cpp:3636
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:4018
int GetExtraBlockRelayCount() const
Definition: net.cpp:2465
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:25
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
Definition: txdownloadman.h:30
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
uint256 GetBlockHash() const
Definition: chain.h:243
constexpr const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.h:107
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
std::chrono::seconds median_outbound_time_offset
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
bool IsValid() const
Definition: validation.h:106
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
BlockFilterType
Definition: blockfilter.h:92
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:121
#define LOCK2(cs1, cs2)
Definition: sync.h:258
initial value. Block has not yet been rejected
bool IsGenBlkMsg() const
Definition: protocol.h:519
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:111
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:600
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:125
CAmount m_fee_filter_received
constexpr const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.h:65
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:246
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
Definition: validation.h:1116
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman&#39;s new table.
Definition: addrman.cpp:1302
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
uint64_t m_addr_processed
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:859
ArgsManager & args
Definition: bitcoind.cpp:277
static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it)
Definition: txmempool.cpp:847
Scripts & signatures ok.
Definition: chain.h:115
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
Definition: net.h:677
constexpr const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.h:212
Class responsible for deciding what transactions to request and, once downloaded, whether and how to ...
constexpr const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.h:113
const bool m_preferred
Whether this peer is preferred for transaction download.
Definition: txdownloadman.h:51
constexpr const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.h:87
constexpr const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.h:102
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:25
std::vector< CTransactionRef > txn
this block was cached as being invalid and we didn&#39;t store the reason why
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:855
Validation result for package mempool acceptance.
Definition: validation.h:228
bool IsDiscouraged(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is discouraged.
Definition: banman.cpp:83
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
#define LOCK(cs)
Definition: sync.h:257
void StartExtraBlockRelayPeers()
Definition: net.cpp:2426
const char * name
Definition: rest.cpp:49
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:146
constexpr const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.h:180
std::string ToString() const
Definition: validation.h:112
the block failed to meet one of our checkpoints
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
constexpr const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.h:70
constexpr const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.h:229
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:447
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:530
Fast randomness source.
Definition: random.h:376
Transport protocol agnostic message container.
Definition: net.h:230
int64_t nPowTargetSpacing
Definition: params.h:117
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:453
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:83
static constexpr SerParams V1
Definition: netaddress.h:231
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:719
bool IsGenTxMsg() const
Definition: protocol.h:515
bool LoadingBlocks() const
Definition: blockstorage.h:355
bool IsManualConn() const
Definition: net.h:779
int GetExtraFullOutboundCount() const
Definition: net.cpp:2451
A CService with information about it as peer.
Definition: protocol.h:366
std::string ToString() const
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition: addrman.cpp:1342
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:899
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
arith_uint256 CalculateClaimedHeadersWork(std::span< const CBlockHeader > headers)
Return the sum of the claimed work on a given set of headers.
uint256 hash
Definition: protocol.h:525
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1113
static const int PROTOCOL_VERSION
network protocol versioning
Result GetResult() const
Definition: validation.h:109
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
constexpr const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.h:237
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool ExpectServicesFromConn() const
Definition: net.h:815
int64_t presync_height
int64_t NodeId
Definition: net.h:97
Definition: net.h:1051
Defined in BIP144.
Definition: protocol.h:485
bool GetNetworkActive() const
Definition: net.h:1136
CFeeRate min_relay_feerate
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:77
""_hex is a compile-time user-defined literal returning a std::array<std::byte>, equivalent to ParseH...
Definition: strencodings.h:427
std::string ToString() const
Definition: uint256.cpp:47
constexpr const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.h:127
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:846
constexpr const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.h:96
std::vector< uint256 > vHave
Definition: block.h:134
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
Definition: validation.h:1001
NodeId GetId() const
Definition: net.h:894
TRACEPOINT_SEMAPHORE(net, inbound_message)
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
Definition: protocol.h:457
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3930
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition: net.h:715
Parameters that influence chain consensus.
Definition: params.h:74
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:967
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
std::atomic_bool fDisconnect
Definition: net.h:731
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:126
CFeeRate GetMinFee(size_t sizelimit) const
Definition: txmempool.cpp:1126
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:872
constexpr bool IsNull() const
Definition: uint256.h:48
bool IsMsgWitnessBlk() const
Definition: protocol.h:512
fails some policy, but might be acceptable if submitted in a (different) package
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition: validation.h:123
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:301
bool IsRoutable() const
Definition: netaddress.cpp:466
#define Assume(val)
Assume is the identity function.
Definition: check.h:97
256-bit unsigned big integer.
bool IsWtxid() const
Definition: transaction.h:436
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:360
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< CTransactionRef > &extra_txn)
std::chrono::seconds PowTargetSpacing() const
Definition: params.h:119
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:81
std::chrono::microseconds m_ping_wait
TxMempoolInfo info_for_relay(const GenTxid &gtxid, uint64_t last_sequence) const
Returns info for a transaction if its entry_sequence < last_sequence.
Definition: txmempool.cpp:902
constexpr const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.h:75
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:373
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: messages.h:20
const CAddress addr
Definition: net.h:708
constexpr const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.h:139
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
bool IsBlockOnlyConn() const
Definition: net.h:799
#define LogInfo(...)
Definition: logging.h:356
Transaction is missing a witness.
size_type size() const
Definition: streams.h:181
if(!SetupNetworking())
constexpr const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.h:218
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:295
bool IsMsgBlk() const
Definition: protocol.h:508
uint256 GetHash() const
Definition: block.cpp:11
constexpr const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.h:206
PackageValidationState m_state
Definition: validation.h:230
256-bit opaque blob.
Definition: uint256.h:201
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:53
ServiceFlags their_services
invalid by consensus rules (excluding any below reasons)
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:26
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
const arith_uint256 & MinimumChainWork() const
Definition: validation.h:979
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:459
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:72
std::chrono::seconds time_offset
constexpr const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.h:150
constexpr const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.h:164
the block&#39;s data didn&#39;t match the data committed to by the PoW
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition: net.cpp:2236
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:303
auto result
Definition: common-types.h:74
#define LogDebug(category,...)
Definition: logging.h:381
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
std::set< uint256 > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:687
uint32_t GetMappedAS(const CNetAddr &addr) const
Definition: net.cpp:3619
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:138
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:140
const CChainParams & Params()
Return the currently selected parameters.
constexpr const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.h:92
constexpr const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.h:254
#define LogError(...)
Definition: logging.h:358
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
#define TRACEPOINT(context,...)
Definition: trace.h:49
bool IsTxAvailable(size_t index) const
A block this one builds on is invalid.
TxMempoolInfo info(const GenTxid &gtxid) const
Definition: txmempool.cpp:893
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry&#39;s service bits.
Definition: addrman.cpp:1347
bool fLogIPs
Definition: logging.cpp:47
#define ACQUIRED_BEFORE(...)
Definition: threadsafety.h:41
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept, const std::optional< CFeeRate > &client_maxfeerate)
Validate (and maybe submit) a package to the mempool.
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:28
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:433
#define LIMITED_STRING(obj, n)
Definition: serialize.h:502
bool fListen
Definition: net.cpp:123
Fee rate in satoshis per kilovirtualbyte: CAmount / kvB.
Definition: feerate.h:32
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span
Like the Span constructor, but for (const) unsigned char member types only.
Definition: span.h:296
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition: net.h:728
SipHash-2-4.
Definition: siphash.h:14
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:254
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:26
#define AssertLockNotHeld(cs)
Definition: sync.h:147
bool IsInvalid() const
Definition: validation.h:107
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:65
static int count
std::vector< NodeId > m_senders
Definition: txdownloadman.h:59
std::atomic< int > nVersion
Definition: net.h:716
#define GUARDED_BY(x)
Definition: threadsafety.h:38
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
Definition: validation.h:1121
std::string ConnectionTypeAsString() const
Definition: net.h:948
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1994
std::string DisconnectMsg(bool log_ip) const
Helper function to log disconnects.
Definition: net.cpp:715
this node does not have a mempool so can&#39;t validate the transaction
CTransactionRef get(const uint256 &hash) const
Definition: txmempool.cpp:884
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
Definition: logging.h:328
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:848
constexpr const char * TX
The tx message transmits a single transaction.
Definition: protocol.h:117
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:50
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:1069
bool IsAddrFetchConn() const
Definition: net.h:807
HeadersSyncState:
Definition: headerssync.h:101
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:131
A Span is an object that can refer to a contiguous sequence of objects.
Definition: span.h:97
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:295
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:153
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:208
Information about a peer.
Definition: net.h:672
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:93
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
std::vector< int > vHeightInFlight
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e...
Definition: net.h:866
void ForEachNode(const NodeFn &func)
Definition: net.h:1151
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:39
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:120
full block available in blk*.dat
Definition: chain.h:121
bool Good(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
Mark an address record as accessible and attempt to move it to addrman&#39;s tried table.
Definition: addrman.cpp:1307
static constexpr SerParams V1_NETWORK
Definition: protocol.h:408
#define LogPrintf(...)
Definition: logging.h:361
int64_t GetTime()
DEPRECATED, see GetTime.
Definition: time.cpp:76
Defined in BIP 339.
Definition: protocol.h:481
int GetCommonVersion() const
Definition: net.h:924
const std::string m_addr_name
Definition: net.h:711
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Definition: random.h:395
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:481
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:724
std::string ToString() const
Definition: txdownloadman.h:81
bool IsInboundConn() const
Definition: net.h:811
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: cs_main.cpp:8
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we&#39;re willing to process on average.
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
This is used by net_processing to report pre-synchronization progress of headers, as headers are not ...
otherwise didn&#39;t meet our local policy rules
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
A generic txid reference (txid or wtxid).
Definition: transaction.h:427
CAmount GetFeePerK() const
Return the fee in satoshis for a vsize of 1000 vbytes.
Definition: feerate.h:60
constexpr const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.h:192
bool GetUseAddrmanOutgoing() const
Definition: net.h:1137
constexpr const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.h:132
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:170
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1110
constexpr const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.h:200
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:21
static GenTxid Txid(const uint256 &hash)
Definition: transaction.h:434
const Wtxid & GetWitnessHash() const LIFETIMEBOUND
Definition: transaction.h:344
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:233
const uint256 & GetHash() const LIFETIMEBOUND
Definition: transaction.h:437
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it...
Definition: txmempool.h:390
bool IsFullOutboundConn() const
Definition: net.h:775
#define Assert(val)
Identity function.
Definition: check.h:85
const Txid & GetHash() const LIFETIMEBOUND
Definition: transaction.h:343
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:146
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:195
static constexpr TransactionSerParams TX_NO_WITNESS
Definition: transaction.h:196
ReachableNets g_reachable_nets
Definition: netbase.cpp:43
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:39
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
const Options m_opts
Definition: txmempool.h:439
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.