Bitcoin Core  27.1.0
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2022 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/amount.h>
14 #include <consensus/validation.h>
15 #include <deploymentstatus.h>
16 #include <hash.h>
17 #include <headerssync.h>
18 #include <index/blockfilterindex.h>
19 #include <kernel/chain.h>
20 #include <kernel/mempool_entry.h>
21 #include <logging.h>
22 #include <merkleblock.h>
23 #include <netbase.h>
24 #include <netmessagemaker.h>
25 #include <node/blockstorage.h>
26 #include <node/txreconciliation.h>
27 #include <policy/fees.h>
28 #include <policy/policy.h>
29 #include <policy/settings.h>
30 #include <primitives/block.h>
31 #include <primitives/transaction.h>
32 #include <random.h>
33 #include <reverse_iterator.h>
34 #include <scheduler.h>
35 #include <streams.h>
36 #include <sync.h>
37 #include <timedata.h>
38 #include <tinyformat.h>
39 #include <txmempool.h>
40 #include <txorphanage.h>
41 #include <txrequest.h>
42 #include <util/check.h>
43 #include <util/strencodings.h>
44 #include <util/time.h>
45 #include <util/trace.h>
46 #include <validation.h>
47 
48 #include <algorithm>
49 #include <atomic>
50 #include <future>
51 #include <memory>
52 #include <optional>
53 #include <typeinfo>
54 #include <utility>
55 
58 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
59 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
61 static constexpr auto HEADERS_RESPONSE_TIME{2min};
65 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
67 static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
69 static constexpr auto STALE_CHECK_INTERVAL{10min};
71 static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
73 static constexpr auto MINIMUM_CONNECT_TIME{30s};
75 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
78 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
81 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
83 static constexpr auto PING_INTERVAL{2min};
85 static const unsigned int MAX_LOCATOR_SZ = 101;
87 static const unsigned int MAX_INV_SZ = 50000;
90 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
95 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
97 static constexpr auto TXID_RELAY_DELAY{2s};
99 static constexpr auto NONPREF_PEER_TX_DELAY{2s};
101 static constexpr auto OVERLOADED_PEER_TX_DELAY{2s};
103 static constexpr auto GETDATA_TX_INTERVAL{60s};
105 static const unsigned int MAX_GETDATA_SZ = 1000;
107 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
110 static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
112 static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
115 static const unsigned int MAX_HEADERS_RESULTS = 2000;
118 static const int MAX_CMPCTBLOCK_DEPTH = 5;
120 static const int MAX_BLOCKTXN_DEPTH = 10;
125 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
127 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
129 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
131 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
133 static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
135 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
137 static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
139 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
141 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
143 static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
146 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
150 static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
153 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
157 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
158 static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
159 static_assert(INVENTORY_BROADCAST_MAX <= MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
161 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
163 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
165 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
167 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
169 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
171 static constexpr size_t MAX_ADDR_TO_SEND{1000};
174 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
180 static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
181 
182 // Internal stuff
183 namespace {
185 struct QueuedBlock {
187  const CBlockIndex* pindex;
189  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
190 };
191 
204 struct Peer {
206  const NodeId m_id{0};
207 
221  const ServiceFlags m_our_services;
223  std::atomic<ServiceFlags> m_their_services{NODE_NONE};
224 
226  Mutex m_misbehavior_mutex;
228  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
230  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
231 
233  Mutex m_block_inv_mutex;
237  std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
241  std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
246  uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
247 
249  std::atomic<int> m_starting_height{-1};
250 
252  std::atomic<uint64_t> m_ping_nonce_sent{0};
254  std::atomic<std::chrono::microseconds> m_ping_start{0us};
256  std::atomic<bool> m_ping_queued{false};
257 
259  std::atomic<bool> m_wtxid_relay{false};
266  std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
267 
268  struct TxRelay {
269  mutable RecursiveMutex m_bloom_filter_mutex;
271  bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
273  std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
274 
275  mutable RecursiveMutex m_tx_inventory_mutex;
279  CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
284  std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
288  bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
291  std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
294  uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
295 
297  std::atomic<CAmount> m_fee_filter_received{0};
298  };
299 
300  /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
301  TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
302  {
303  LOCK(m_tx_relay_mutex);
304  Assume(!m_tx_relay);
305  m_tx_relay = std::make_unique<Peer::TxRelay>();
306  return m_tx_relay.get();
307  };
308 
309  TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
310  {
311  return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
312  };
313 
315  std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
325  std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
340  std::atomic_bool m_addr_relay_enabled{false};
342  bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
344  mutable Mutex m_addr_send_times_mutex;
346  std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
348  std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
351  std::atomic_bool m_wants_addrv2{false};
353  bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
356  double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
358  std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
360  std::atomic<uint64_t> m_addr_rate_limited{0};
362  std::atomic<uint64_t> m_addr_processed{0};
363 
365  bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
366 
368  Mutex m_getdata_requests_mutex;
370  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
371 
374 
376  Mutex m_headers_sync_mutex;
379  std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
380 
382  std::atomic<bool> m_sent_sendheaders{false};
383 
385  int m_num_unconnecting_headers_msgs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
386 
388  std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
389 
391  bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
392 
393  explicit Peer(NodeId id, ServiceFlags our_services)
394  : m_id{id}
395  , m_our_services{our_services}
396  {}
397 
398 private:
399  mutable Mutex m_tx_relay_mutex;
400 
402  std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
403 };
404 
405 using PeerRef = std::shared_ptr<Peer>;
406 
413 struct CNodeState {
415  const CBlockIndex* pindexBestKnownBlock{nullptr};
417  uint256 hashLastUnknownBlock{};
419  const CBlockIndex* pindexLastCommonBlock{nullptr};
421  const CBlockIndex* pindexBestHeaderSent{nullptr};
423  bool fSyncStarted{false};
425  std::chrono::microseconds m_stalling_since{0us};
426  std::list<QueuedBlock> vBlocksInFlight;
428  std::chrono::microseconds m_downloading_since{0us};
430  bool fPreferredDownload{false};
432  bool m_requested_hb_cmpctblocks{false};
434  bool m_provides_cmpctblocks{false};
435 
460  struct ChainSyncTimeoutState {
462  std::chrono::seconds m_timeout{0s};
464  const CBlockIndex* m_work_header{nullptr};
466  bool m_sent_getheaders{false};
468  bool m_protect{false};
469  };
470 
471  ChainSyncTimeoutState m_chain_sync;
472 
474  int64_t m_last_block_announcement{0};
475 
477  const bool m_is_inbound;
478 
479  CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
480 };
481 
482 class PeerManagerImpl final : public PeerManager
483 {
484 public:
485  PeerManagerImpl(CConnman& connman, AddrMan& addrman,
486  BanMan* banman, ChainstateManager& chainman,
487  CTxMemPool& pool, Options opts);
488 
490  void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
491  EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
492  void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
493  EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
494  void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
495  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
496  void BlockChecked(const CBlock& block, const BlockValidationState& state) override
497  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
498  void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
499  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
500 
502  void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
503  void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex);
504  bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
505  bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
506  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
507  bool SendMessages(CNode* pto) override
508  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, g_msgproc_mutex);
509 
511  void StartScheduledTasks(CScheduler& scheduler) override;
512  void CheckForStaleTipAndEvictPeers() override;
513  std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
514  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
515  bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
516  bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
517  void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
518  void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
519  void SetBestBlock(int height, std::chrono::seconds time) override
520  {
521  m_best_height = height;
522  m_best_block_time = time;
523  };
524  void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
525  void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
526  const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
527  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
528  void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
529  ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
530 
531 private:
533  void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
534 
536  void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
537 
539  void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
540 
543  PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
544 
547  PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
548 
553  void Misbehaving(Peer& peer, int howmuch, const std::string& message);
554 
565  bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
566  bool via_compact_block, const std::string& message = "")
567  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
568 
574  bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
575  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
576 
583  bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
584 
596  bool ProcessOrphanTx(Peer& peer)
597  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
598 
606  void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
607  std::vector<CBlockHeader>&& headers,
608  bool via_compact_block)
609  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
612  bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
614  arith_uint256 GetAntiDoSWorkThreshold();
618  void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
620  bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
639  bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
640  std::vector<CBlockHeader>& headers)
641  EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
653  bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
654  const CBlockIndex* chain_start_header,
655  std::vector<CBlockHeader>& headers)
656  EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
657 
660  bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
661 
666  bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
668  void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
670  void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
671  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
672 
673  void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
674 
678  void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
680 
682  void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); }
683  template <typename... Args>
684  void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const
685  {
686  m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...));
687  }
688 
690  void PushNodeVersion(CNode& pnode, const Peer& peer);
691 
696  void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
697 
699  void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
700 
702  void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
703 
711  void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
712 
714  void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
715 
717 
719 
720  const CChainParams& m_chainparams;
721  CConnman& m_connman;
722  AddrMan& m_addrman;
724  BanMan* const m_banman;
725  ChainstateManager& m_chainman;
726  CTxMemPool& m_mempool;
727  TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
728  std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
729 
731  std::atomic<int> m_best_height{-1};
733  std::atomic<std::chrono::seconds> m_best_block_time{0s};
734 
736  std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
737 
738  const Options m_opts;
739 
740  bool RejectIncomingTxs(const CNode& peer) const;
741 
744  bool m_initial_sync_finished GUARDED_BY(cs_main){false};
745 
748  mutable Mutex m_peer_mutex;
755  std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
756 
758  std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
759 
761  const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
763  CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
764 
765  uint32_t GetFetchFlags(const Peer& peer) const;
766 
767  std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
768 
770  int nSyncStarted GUARDED_BY(cs_main) = 0;
771 
773  uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
774 
781  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
782 
784  std::atomic<int> m_wtxid_relay_peers{0};
785 
787  int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
788 
790  int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
791 
793  std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
794 
795  bool AlreadyHaveTx(const GenTxid& gtxid)
796  EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
797 
832  CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
833  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
834 
835  /*
836  * Filter for transactions that have been recently confirmed.
837  * We use this to avoid requesting transactions that have already been
838  * confirnmed.
839  *
840  * Blocks don't typically have more than 4000 transactions, so this should
841  * be at least six blocks (~1 hr) worth of transactions that we can store,
842  * inserting both a txid and wtxid for every observed transaction.
843  * If the number of transactions appearing in a block goes up, or if we are
844  * seeing getdata requests more than an hour after initial announcement, we
845  * can increase this number.
846  * The false positive rate of 1/1M should come out to less than 1
847  * transaction per day that would be inadvertently ignored (which is the
848  * same probability that we have in the reject filter).
849  */
850  Mutex m_recent_confirmed_transactions_mutex;
851  CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
852 
859  std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
860  std::chrono::seconds average_interval);
861 
862 
863  // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
864  Mutex m_most_recent_block_mutex;
865  std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
866  std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
867  uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
868  std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
869 
870  // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
872  Mutex m_headers_presync_mutex;
880  using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
882  std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
884  NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
886  std::atomic_bool m_headers_presync_should_signal{false};
887 
889  int m_highest_fast_announce GUARDED_BY(::cs_main){0};
890 
892  bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
893 
895  bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
896 
904  void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
905 
906  /* Mark a block as in flight
907  * Returns false, still setting pit, if the block was already in flight from the same peer
908  * pit will only be valid as long as the same cs_main lock is being held
909  */
910  bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
911 
912  bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
913 
917  void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
918 
920  void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
921 
949  void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
950 
951  /* Multimap used to preserve insertion order */
952  typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
953  BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
954 
956  std::atomic<std::chrono::seconds> m_last_tip_update{0s};
957 
959  CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
961 
962  void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
963  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
965 
967  void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
968 
970  void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
971  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
972 
979  void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
980 
982  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
983 
985  int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
986 
988  TxOrphanage m_orphanage;
989 
990  void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
991 
995  std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
997  size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
998 
1000  void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1002  void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1003  bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1004 
1009  int64_t ApproximateBestBlockDepth() const;
1010 
1017  bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1018  bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1019  void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
1020  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
1021 
1037  bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
1038  BlockFilterType filter_type, uint32_t start_height,
1039  const uint256& stop_hash, uint32_t max_height_diff,
1040  const CBlockIndex*& stop_index,
1041  BlockFilterIndex*& filter_index);
1042 
1052  void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv);
1053 
1063  void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv);
1064 
1074  void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv);
1075 
1082  bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1083 
1084  void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1085  void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1086 };
1087 
1088 const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1089 {
1090  std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1091  if (it == m_node_states.end())
1092  return nullptr;
1093  return &it->second;
1094 }
1095 
1097 {
1098  return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1099 }
1100 
1106 static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1107 {
1108  return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1109 }
1110 
1111 void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1112 {
1113  assert(peer.m_addr_known);
1114  peer.m_addr_known->insert(addr.GetKey());
1115 }
1116 
1117 void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1118 {
1119  // Known checking here is only to save space from duplicates.
1120  // Before sending, we'll filter it again for known addresses that were
1121  // added after addresses were pushed.
1122  assert(peer.m_addr_known);
1123  if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1124  if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1125  peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1126  } else {
1127  peer.m_addrs_to_send.push_back(addr);
1128  }
1129  }
1130 }
1131 
1132 static void AddKnownTx(Peer& peer, const uint256& hash)
1133 {
1134  auto tx_relay = peer.GetTxRelay();
1135  if (!tx_relay) return;
1136 
1137  LOCK(tx_relay->m_tx_inventory_mutex);
1138  tx_relay->m_tx_inventory_known_filter.insert(hash);
1139 }
1140 
1142 static bool CanServeBlocks(const Peer& peer)
1143 {
1144  return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1145 }
1146 
1149 static bool IsLimitedPeer(const Peer& peer)
1150 {
1151  return (!(peer.m_their_services & NODE_NETWORK) &&
1152  (peer.m_their_services & NODE_NETWORK_LIMITED));
1153 }
1154 
1156 static bool CanServeWitnesses(const Peer& peer)
1157 {
1158  return peer.m_their_services & NODE_WITNESS;
1159 }
1160 
1161 std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1162  std::chrono::seconds average_interval)
1163 {
1164  if (m_next_inv_to_inbounds.load() < now) {
1165  // If this function were called from multiple threads simultaneously
1166  // it would possible that both update the next send variable, and return a different result to their caller.
1167  // This is not possible in practice as only the net processing thread invokes this function.
1168  m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
1169  }
1170  return m_next_inv_to_inbounds;
1171 }
1172 
1173 bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1174 {
1175  return mapBlocksInFlight.count(hash);
1176 }
1177 
1178 bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1179 {
1180  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1181  auto [nodeid, block_it] = range.first->second;
1182  CNodeState& nodestate = *Assert(State(nodeid));
1183  if (!nodestate.m_is_inbound) return true;
1184  }
1185 
1186  return false;
1187 }
1188 
1189 void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1190 {
1191  auto range = mapBlocksInFlight.equal_range(hash);
1192  if (range.first == range.second) {
1193  // Block was not requested from any peer
1194  return;
1195  }
1196 
1197  // We should not have requested too many of this block
1198  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1199 
1200  while (range.first != range.second) {
1201  auto [node_id, list_it] = range.first->second;
1202 
1203  if (from_peer && *from_peer != node_id) {
1204  range.first++;
1205  continue;
1206  }
1207 
1208  CNodeState& state = *Assert(State(node_id));
1209 
1210  if (state.vBlocksInFlight.begin() == list_it) {
1211  // First block on the queue was received, update the start download time for the next one
1212  state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1213  }
1214  state.vBlocksInFlight.erase(list_it);
1215 
1216  if (state.vBlocksInFlight.empty()) {
1217  // Last validated block on the queue for this peer was received.
1218  m_peers_downloading_from--;
1219  }
1220  state.m_stalling_since = 0us;
1221 
1222  range.first = mapBlocksInFlight.erase(range.first);
1223  }
1224 }
1225 
1226 bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1227 {
1228  const uint256& hash{block.GetBlockHash()};
1229 
1230  CNodeState *state = State(nodeid);
1231  assert(state != nullptr);
1232 
1233  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1234 
1235  // Short-circuit most stuff in case it is from the same node
1236  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1237  if (range.first->second.first == nodeid) {
1238  if (pit) {
1239  *pit = &range.first->second.second;
1240  }
1241  return false;
1242  }
1243  }
1244 
1245  // Make sure it's not being fetched already from same peer.
1246  RemoveBlockRequest(hash, nodeid);
1247 
1248  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1249  {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1250  if (state->vBlocksInFlight.size() == 1) {
1251  // We're starting a block download (batch) from this peer.
1252  state->m_downloading_since = GetTime<std::chrono::microseconds>();
1253  m_peers_downloading_from++;
1254  }
1255  auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1256  if (pit) {
1257  *pit = &itInFlight->second.second;
1258  }
1259  return true;
1260 }
1261 
1262 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1263 {
1265 
1266  // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1267  // mempool will not contain the transactions necessary to reconstruct the
1268  // compact block.
1269  if (m_opts.ignore_incoming_txs) return;
1270 
1271  CNodeState* nodestate = State(nodeid);
1272  if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1273  // Don't request compact blocks if the peer has not signalled support
1274  return;
1275  }
1276 
1277  int num_outbound_hb_peers = 0;
1278  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1279  if (*it == nodeid) {
1280  lNodesAnnouncingHeaderAndIDs.erase(it);
1281  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1282  return;
1283  }
1284  CNodeState *state = State(*it);
1285  if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
1286  }
1287  if (nodestate->m_is_inbound) {
1288  // If we're adding an inbound HB peer, make sure we're not removing
1289  // our last outbound HB peer in the process.
1290  if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1291  CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
1292  if (remove_node != nullptr && !remove_node->m_is_inbound) {
1293  // Put the HB outbound peer in the second slot, so that it
1294  // doesn't get removed.
1295  std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1296  }
1297  }
1298  }
1299  m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1301  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1302  // As per BIP152, we only get 3 of our peers to announce
1303  // blocks using compact encodings.
1304  m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1305  MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
1306  // save BIP152 bandwidth state: we select peer to be low-bandwidth
1307  pnodeStop->m_bip152_highbandwidth_to = false;
1308  return true;
1309  });
1310  lNodesAnnouncingHeaderAndIDs.pop_front();
1311  }
1312  MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION);
1313  // save BIP152 bandwidth state: we select peer to be high-bandwidth
1314  pfrom->m_bip152_highbandwidth_to = true;
1315  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1316  return true;
1317  });
1318 }
1319 
1320 bool PeerManagerImpl::TipMayBeStale()
1321 {
1323  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1324  if (m_last_tip_update.load() == 0s) {
1325  m_last_tip_update = GetTime<std::chrono::seconds>();
1326  }
1327  return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1328 }
1329 
1330 int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
1331 {
1332  return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
1333 }
1334 
1335 bool PeerManagerImpl::CanDirectFetch()
1336 {
1337  return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1338 }
1339 
1340 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1341 {
1342  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1343  return true;
1344  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1345  return true;
1346  return false;
1347 }
1348 
1349 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1350  CNodeState *state = State(nodeid);
1351  assert(state != nullptr);
1352 
1353  if (!state->hashLastUnknownBlock.IsNull()) {
1354  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1355  if (pindex && pindex->nChainWork > 0) {
1356  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1357  state->pindexBestKnownBlock = pindex;
1358  }
1359  state->hashLastUnknownBlock.SetNull();
1360  }
1361  }
1362 }
1363 
1364 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1365  CNodeState *state = State(nodeid);
1366  assert(state != nullptr);
1367 
1368  ProcessBlockAvailability(nodeid);
1369 
1370  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1371  if (pindex && pindex->nChainWork > 0) {
1372  // An actually better block was announced.
1373  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1374  state->pindexBestKnownBlock = pindex;
1375  }
1376  } else {
1377  // An unknown block was announced; just assume that the latest one is the best one.
1378  state->hashLastUnknownBlock = hash;
1379  }
1380 }
1381 
1382 // Logic for calculating which blocks to download from a given peer, given our current tip.
1383 void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1384 {
1385  if (count == 0)
1386  return;
1387 
1388  vBlocks.reserve(vBlocks.size() + count);
1389  CNodeState *state = State(peer.m_id);
1390  assert(state != nullptr);
1391 
1392  // Make sure pindexBestKnownBlock is up to date, we'll need it.
1393  ProcessBlockAvailability(peer.m_id);
1394 
1395  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1396  // This peer has nothing interesting.
1397  return;
1398  }
1399 
1400  if (state->pindexLastCommonBlock == nullptr) {
1401  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1402  // Guessing wrong in either direction is not a problem.
1403  state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1404  }
1405 
1406  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1407  // of its current tip anymore. Go back enough to fix that.
1408  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1409  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1410  return;
1411 
1412  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1413  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1414  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1415  // download that next block if the window were 1 larger.
1416  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1417 
1418  FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1419 }
1420 
1421 void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1422 {
1423  Assert(from_tip);
1424  Assert(target_block);
1425 
1426  if (vBlocks.size() >= count) {
1427  return;
1428  }
1429 
1430  vBlocks.reserve(count);
1431  CNodeState *state = Assert(State(peer.m_id));
1432 
1433  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1434  // This peer can't provide us the complete series of blocks leading up to the
1435  // assumeutxo snapshot base.
1436  //
1437  // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1438  // will eventually crash when we try to reorg to it. Let other logic
1439  // deal with whether we disconnect this peer.
1440  //
1441  // TODO at some point in the future, we might choose to request what blocks
1442  // this peer does have from the historical chain, despite it not having a
1443  // complete history beneath the snapshot base.
1444  return;
1445  }
1446 
1447  FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1448 }
1449 
1450 void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1451 {
1452  std::vector<const CBlockIndex*> vToFetch;
1453  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1454  NodeId waitingfor = -1;
1455  while (pindexWalk->nHeight < nMaxHeight) {
1456  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1457  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1458  // as iterating over ~100 CBlockIndex* entries anyway.
1459  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1460  vToFetch.resize(nToFetch);
1461  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1462  vToFetch[nToFetch - 1] = pindexWalk;
1463  for (unsigned int i = nToFetch - 1; i > 0; i--) {
1464  vToFetch[i - 1] = vToFetch[i]->pprev;
1465  }
1466 
1467  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1468  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1469  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1470  // already part of our chain (and therefore don't need it even if pruned).
1471  for (const CBlockIndex* pindex : vToFetch) {
1472  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1473  // We consider the chain that this peer is on invalid.
1474  return;
1475  }
1476  if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1477  // We wouldn't download this block or its descendants from this peer.
1478  return;
1479  }
1480  if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1481  if (activeChain && pindex->HaveNumChainTxs())
1482  state->pindexLastCommonBlock = pindex;
1483  } else if (!IsBlockRequested(pindex->GetBlockHash())) {
1484  // The block is not already downloaded, and not yet in flight.
1485  if (pindex->nHeight > nWindowEnd) {
1486  // We reached the end of the window.
1487  if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1488  // We aren't able to fetch anything, but we would be if the download window was one larger.
1489  if (nodeStaller) *nodeStaller = waitingfor;
1490  }
1491  return;
1492  }
1493  vBlocks.push_back(pindex);
1494  if (vBlocks.size() == count) {
1495  return;
1496  }
1497  } else if (waitingfor == -1) {
1498  // This is the first already-in-flight block.
1499  waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1500  }
1501  }
1502  }
1503 }
1504 
1505 } // namespace
1506 
1507 void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1508 {
1509  uint64_t my_services{peer.m_our_services};
1510  const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1511  uint64_t nonce = pnode.GetLocalNonce();
1512  const int nNodeStartingHeight{m_best_height};
1513  NodeId nodeid = pnode.GetId();
1514  CAddress addr = pnode.addr;
1515 
1516  CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1517  uint64_t your_services{addr.nServices};
1518 
1519  const bool tx_relay{!RejectIncomingTxs(pnode)};
1520  MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1521  your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1522  my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1523  nonce, strSubVersion, nNodeStartingHeight, tx_relay);
1524 
1525  if (fLogIPs) {
1526  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1527  } else {
1528  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1529  }
1530 }
1531 
1532 void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
1533 {
1534  AssertLockHeld(::cs_main); // For m_txrequest
1535  NodeId nodeid = node.GetId();
1536  if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
1537  // Too many queued announcements from this peer
1538  return;
1539  }
1540  const CNodeState* state = State(nodeid);
1541 
1542  // Decide the TxRequestTracker parameters for this announcement:
1543  // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission)
1544  // - "reqtime": current time plus delays for:
1545  // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
1546  // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
1547  // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
1548  // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
1549  auto delay{0us};
1550  const bool preferred = state->fPreferredDownload;
1551  if (!preferred) delay += NONPREF_PEER_TX_DELAY;
1552  if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
1553  const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) &&
1554  m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
1555  if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
1556  m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
1557 }
1558 
1559 void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1560 {
1561  LOCK(cs_main);
1562  CNodeState *state = State(node);
1563  if (state) state->m_last_block_announcement = time_in_seconds;
1564 }
1565 
1566 void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services)
1567 {
1568  NodeId nodeid = node.GetId();
1569  {
1570  LOCK(cs_main);
1571  m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
1572  assert(m_txrequest.Count(nodeid) == 0);
1573  }
1574  PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
1575  {
1576  LOCK(m_peer_mutex);
1577  m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1578  }
1579  if (!node.IsInboundConn()) {
1580  PushNodeVersion(node, *peer);
1581  }
1582 }
1583 
1584 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1585 {
1586  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1587 
1588  for (const auto& txid : unbroadcast_txids) {
1589  CTransactionRef tx = m_mempool.get(txid);
1590 
1591  if (tx != nullptr) {
1592  RelayTransaction(txid, tx->GetWitnessHash());
1593  } else {
1594  m_mempool.RemoveUnbroadcastTx(txid, true);
1595  }
1596  }
1597 
1598  // Schedule next run for 10-15 minutes in the future.
1599  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1600  const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1601  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1602 }
1603 
1604 void PeerManagerImpl::FinalizeNode(const CNode& node)
1605 {
1606  NodeId nodeid = node.GetId();
1607  int misbehavior{0};
1608  {
1609  LOCK(cs_main);
1610  {
1611  // We remove the PeerRef from g_peer_map here, but we don't always
1612  // destruct the Peer. Sometimes another thread is still holding a
1613  // PeerRef, so the refcount is >= 1. Be careful not to do any
1614  // processing here that assumes Peer won't be changed before it's
1615  // destructed.
1616  PeerRef peer = RemovePeer(nodeid);
1617  assert(peer != nullptr);
1618  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1619  m_wtxid_relay_peers -= peer->m_wtxid_relay;
1620  assert(m_wtxid_relay_peers >= 0);
1621  }
1622  CNodeState *state = State(nodeid);
1623  assert(state != nullptr);
1624 
1625  if (state->fSyncStarted)
1626  nSyncStarted--;
1627 
1628  for (const QueuedBlock& entry : state->vBlocksInFlight) {
1629  auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1630  while (range.first != range.second) {
1631  auto [node_id, list_it] = range.first->second;
1632  if (node_id != nodeid) {
1633  range.first++;
1634  } else {
1635  range.first = mapBlocksInFlight.erase(range.first);
1636  }
1637  }
1638  }
1639  m_orphanage.EraseForPeer(nodeid);
1640  m_txrequest.DisconnectedPeer(nodeid);
1641  if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1642  m_num_preferred_download_peers -= state->fPreferredDownload;
1643  m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1644  assert(m_peers_downloading_from >= 0);
1645  m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1646  assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1647 
1648  m_node_states.erase(nodeid);
1649 
1650  if (m_node_states.empty()) {
1651  // Do a consistency check after the last peer is removed.
1652  assert(mapBlocksInFlight.empty());
1653  assert(m_num_preferred_download_peers == 0);
1654  assert(m_peers_downloading_from == 0);
1655  assert(m_outbound_peers_with_protect_from_disconnect == 0);
1656  assert(m_wtxid_relay_peers == 0);
1657  assert(m_txrequest.Size() == 0);
1658  assert(m_orphanage.Size() == 0);
1659  }
1660  } // cs_main
1661  if (node.fSuccessfullyConnected && misbehavior == 0 &&
1662  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1663  // Only change visible addrman state for full outbound peers. We don't
1664  // call Connected() for feeler connections since they don't have
1665  // fSuccessfullyConnected set.
1666  m_addrman.Connected(node.addr);
1667  }
1668  {
1669  LOCK(m_headers_presync_mutex);
1670  m_headers_presync_stats.erase(nodeid);
1671  }
1672  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1673 }
1674 
1675 bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
1676 {
1677  // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
1678  return !(GetDesirableServiceFlags(services) & (~services));
1679 }
1680 
1681 ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
1682 {
1683  if (services & NODE_NETWORK_LIMITED) {
1684  // Limited peers are desirable when we are close to the tip.
1685  if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
1687  }
1688  }
1690 }
1691 
1692 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1693 {
1694  LOCK(m_peer_mutex);
1695  auto it = m_peer_map.find(id);
1696  return it != m_peer_map.end() ? it->second : nullptr;
1697 }
1698 
1699 PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1700 {
1701  PeerRef ret;
1702  LOCK(m_peer_mutex);
1703  auto it = m_peer_map.find(id);
1704  if (it != m_peer_map.end()) {
1705  ret = std::move(it->second);
1706  m_peer_map.erase(it);
1707  }
1708  return ret;
1709 }
1710 
1711 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1712 {
1713  {
1714  LOCK(cs_main);
1715  const CNodeState* state = State(nodeid);
1716  if (state == nullptr)
1717  return false;
1718  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1719  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1720  for (const QueuedBlock& queue : state->vBlocksInFlight) {
1721  if (queue.pindex)
1722  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1723  }
1724  }
1725 
1726  PeerRef peer = GetPeerRef(nodeid);
1727  if (peer == nullptr) return false;
1728  stats.their_services = peer->m_their_services;
1729  stats.m_starting_height = peer->m_starting_height;
1730  // It is common for nodes with good ping times to suddenly become lagged,
1731  // due to a new block arriving or other large transfer.
1732  // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1733  // since pingtime does not update until the ping is complete, which might take a while.
1734  // So, if a ping is taking an unusually long time in flight,
1735  // the caller can immediately detect that this is happening.
1736  auto ping_wait{0us};
1737  if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1738  ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1739  }
1740 
1741  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1742  stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1743  stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1744  } else {
1745  stats.m_relay_txs = false;
1746  stats.m_fee_filter_received = 0;
1747  }
1748 
1749  stats.m_ping_wait = ping_wait;
1750  stats.m_addr_processed = peer->m_addr_processed.load();
1751  stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1752  stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1753  {
1754  LOCK(peer->m_headers_sync_mutex);
1755  if (peer->m_headers_sync) {
1756  stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1757  }
1758  }
1759 
1760  return true;
1761 }
1762 
1763 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1764 {
1765  if (m_opts.max_extra_txs <= 0)
1766  return;
1767  if (!vExtraTxnForCompact.size())
1768  vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1769  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1770  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1771 }
1772 
1773 void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message)
1774 {
1775  assert(howmuch > 0);
1776 
1777  LOCK(peer.m_misbehavior_mutex);
1778  const int score_before{peer.m_misbehavior_score};
1779  peer.m_misbehavior_score += howmuch;
1780  const int score_now{peer.m_misbehavior_score};
1781 
1782  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1783  std::string warning;
1784 
1785  if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
1786  warning = " DISCOURAGE THRESHOLD EXCEEDED";
1787  peer.m_should_discourage = true;
1788  }
1789 
1790  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
1791  peer.m_id, score_before, score_now, warning, message_prefixed);
1792 }
1793 
1794 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1795  bool via_compact_block, const std::string& message)
1796 {
1797  PeerRef peer{GetPeerRef(nodeid)};
1798  switch (state.GetResult()) {
1800  break;
1802  // We didn't try to process the block because the header chain may have
1803  // too little work.
1804  break;
1805  // The node is providing invalid data:
1808  if (!via_compact_block) {
1809  if (peer) Misbehaving(*peer, 100, message);
1810  return true;
1811  }
1812  break;
1814  {
1815  LOCK(cs_main);
1816  CNodeState *node_state = State(nodeid);
1817  if (node_state == nullptr) {
1818  break;
1819  }
1820 
1821  // Discourage outbound (but not inbound) peers if on an invalid chain.
1822  // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1823  if (!via_compact_block && !node_state->m_is_inbound) {
1824  if (peer) Misbehaving(*peer, 100, message);
1825  return true;
1826  }
1827  break;
1828  }
1832  if (peer) Misbehaving(*peer, 100, message);
1833  return true;
1834  // Conflicting (but not necessarily invalid) data or different policy:
1836  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1837  if (peer) Misbehaving(*peer, 10, message);
1838  return true;
1841  break;
1842  }
1843  if (message != "") {
1844  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1845  }
1846  return false;
1847 }
1848 
1849 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
1850 {
1851  PeerRef peer{GetPeerRef(nodeid)};
1852  switch (state.GetResult()) {
1854  break;
1855  // The node is providing invalid data:
1857  if (peer) Misbehaving(*peer, 100, "");
1858  return true;
1859  // Conflicting (but not necessarily invalid) data or different policy:
1872  break;
1873  }
1874  return false;
1875 }
1876 
1877 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1878 {
1880  if (m_chainman.ActiveChain().Contains(pindex)) return true;
1881  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1882  (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1883  (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1884 }
1885 
1886 std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1887 {
1888  if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
1889 
1890  // Ensure this peer exists and hasn't been disconnected
1891  PeerRef peer = GetPeerRef(peer_id);
1892  if (peer == nullptr) return "Peer does not exist";
1893 
1894  // Ignore pre-segwit peers
1895  if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
1896 
1897  LOCK(cs_main);
1898 
1899  // Forget about all prior requests
1900  RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1901 
1902  // Mark block as in-flight
1903  if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
1904 
1905  // Construct message to request the block
1906  const uint256& hash{block_index.GetBlockHash()};
1907  std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1908 
1909  // Send block request message to the peer
1910  bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1911  this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
1912  return true;
1913  });
1914 
1915  if (!success) return "Peer not fully connected";
1916 
1917  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1918  hash.ToString(), peer_id);
1919  return std::nullopt;
1920 }
1921 
1922 std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1923  BanMan* banman, ChainstateManager& chainman,
1924  CTxMemPool& pool, Options opts)
1925 {
1926  return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, opts);
1927 }
1928 
1929 PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
1930  BanMan* banman, ChainstateManager& chainman,
1931  CTxMemPool& pool, Options opts)
1932  : m_rng{opts.deterministic_rng},
1933  m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
1934  m_chainparams(chainman.GetParams()),
1935  m_connman(connman),
1936  m_addrman(addrman),
1937  m_banman(banman),
1938  m_chainman(chainman),
1939  m_mempool(pool),
1940  m_opts{opts}
1941 {
1942  // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
1943  // This argument can go away after Erlay support is complete.
1944  if (opts.reconcile_txs) {
1945  m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
1946  }
1947 }
1948 
1949 void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
1950 {
1951  // Stale tip checking and peer eviction are on two different timers, but we
1952  // don't want them to get out of sync due to drift in the scheduler, so we
1953  // combine them in one function and schedule at the quicker (peer-eviction)
1954  // timer.
1955  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1956  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1957 
1958  // schedule next run for 10-15 minutes in the future
1959  const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1960  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1961 }
1962 
1969 void PeerManagerImpl::BlockConnected(
1970  ChainstateRole role,
1971  const std::shared_ptr<const CBlock>& pblock,
1972  const CBlockIndex* pindex)
1973 {
1974  // Update this for all chainstate roles so that we don't mistakenly see peers
1975  // helping us do background IBD as having a stale tip.
1976  m_last_tip_update = GetTime<std::chrono::seconds>();
1977 
1978  // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
1979  auto stalling_timeout = m_block_stalling_timeout.load();
1980  Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
1981  if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
1982  const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
1983  if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1984  LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
1985  }
1986  }
1987 
1988  // The following task can be skipped since we don't maintain a mempool for
1989  // the ibd/background chainstate.
1990  if (role == ChainstateRole::BACKGROUND) {
1991  return;
1992  }
1993  m_orphanage.EraseForBlock(*pblock);
1994 
1995  {
1996  LOCK(m_recent_confirmed_transactions_mutex);
1997  for (const auto& ptx : pblock->vtx) {
1998  m_recent_confirmed_transactions.insert(ptx->GetHash().ToUint256());
1999  if (ptx->HasWitness()) {
2000  m_recent_confirmed_transactions.insert(ptx->GetWitnessHash().ToUint256());
2001  }
2002  }
2003  }
2004  {
2005  LOCK(cs_main);
2006  for (const auto& ptx : pblock->vtx) {
2007  m_txrequest.ForgetTxHash(ptx->GetHash());
2008  m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
2009  }
2010  }
2011 }
2012 
2013 void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
2014 {
2015  // To avoid relay problems with transactions that were previously
2016  // confirmed, clear our filter of recently confirmed transactions whenever
2017  // there's a reorg.
2018  // This means that in a 1-block reorg (where 1 block is disconnected and
2019  // then another block reconnected), our filter will drop to having only one
2020  // block's worth of transactions in it, but that should be fine, since
2021  // presumably the most common case of relaying a confirmed transaction
2022  // should be just after a new block containing it is found.
2023  LOCK(m_recent_confirmed_transactions_mutex);
2024  m_recent_confirmed_transactions.reset();
2025 }
2026 
2031 void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
2032 {
2033  auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2034 
2035  LOCK(cs_main);
2036 
2037  if (pindex->nHeight <= m_highest_fast_announce)
2038  return;
2039  m_highest_fast_announce = pindex->nHeight;
2040 
2041  if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
2042 
2043  uint256 hashBlock(pblock->GetHash());
2044  const std::shared_future<CSerializedNetMsg> lazy_ser{
2045  std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
2046 
2047  {
2048  auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
2049  for (const auto& tx : pblock->vtx) {
2050  most_recent_block_txs->emplace(tx->GetHash(), tx);
2051  most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2052  }
2053 
2054  LOCK(m_most_recent_block_mutex);
2055  m_most_recent_block_hash = hashBlock;
2056  m_most_recent_block = pblock;
2057  m_most_recent_compact_block = pcmpctblock;
2058  m_most_recent_block_txs = std::move(most_recent_block_txs);
2059  }
2060 
2061  m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2063 
2064  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
2065  return;
2066  ProcessBlockAvailability(pnode->GetId());
2067  CNodeState &state = *State(pnode->GetId());
2068  // If the peer has, or we announced to them the previous block already,
2069  // but we don't think they have this one, go ahead and announce it
2070  if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2071 
2072  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2073  hashBlock.ToString(), pnode->GetId());
2074 
2075  const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2076  PushMessage(*pnode, ser_cmpctblock.Copy());
2077  state.pindexBestHeaderSent = pindex;
2078  }
2079  });
2080 }
2081 
2086 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2087 {
2088  SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2089 
2090  // Don't relay inventory during initial block download.
2091  if (fInitialDownload) return;
2092 
2093  // Find the hashes of all blocks that weren't previously in the best chain.
2094  std::vector<uint256> vHashes;
2095  const CBlockIndex *pindexToAnnounce = pindexNew;
2096  while (pindexToAnnounce != pindexFork) {
2097  vHashes.push_back(pindexToAnnounce->GetBlockHash());
2098  pindexToAnnounce = pindexToAnnounce->pprev;
2099  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2100  // Limit announcements in case of a huge reorganization.
2101  // Rely on the peer's synchronization mechanism in that case.
2102  break;
2103  }
2104  }
2105 
2106  {
2107  LOCK(m_peer_mutex);
2108  for (auto& it : m_peer_map) {
2109  Peer& peer = *it.second;
2110  LOCK(peer.m_block_inv_mutex);
2111  for (const uint256& hash : reverse_iterate(vHashes)) {
2112  peer.m_blocks_for_headers_relay.push_back(hash);
2113  }
2114  }
2115  }
2116 
2117  m_connman.WakeMessageHandler();
2118 }
2119 
2124 void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
2125 {
2126  LOCK(cs_main);
2127 
2128  const uint256 hash(block.GetHash());
2129  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2130 
2131  // If the block failed validation, we know where it came from and we're still connected
2132  // to that peer, maybe punish.
2133  if (state.IsInvalid() &&
2134  it != mapBlockSource.end() &&
2135  State(it->second.first)) {
2136  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2137  }
2138  // Check that:
2139  // 1. The block is valid
2140  // 2. We're not in initial block download
2141  // 3. This is currently the best block we're aware of. We haven't updated
2142  // the tip yet so we have no way to check this directly here. Instead we
2143  // just check that there are currently no other blocks in flight.
2144  else if (state.IsValid() &&
2145  !m_chainman.IsInitialBlockDownload() &&
2146  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2147  if (it != mapBlockSource.end()) {
2148  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2149  }
2150  }
2151  if (it != mapBlockSource.end())
2152  mapBlockSource.erase(it);
2153 }
2154 
2156 //
2157 // Messages
2158 //
2159 
2160 
2161 bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
2162 {
2163  if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
2164  // If the chain tip has changed previously rejected transactions
2165  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
2166  // or a double-spend. Reset the rejects filter and give those
2167  // txs a second chance.
2168  hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
2169  m_recent_rejects.reset();
2170  }
2171 
2172  const uint256& hash = gtxid.GetHash();
2173 
2174  if (m_orphanage.HaveTx(gtxid)) return true;
2175 
2176  {
2177  LOCK(m_recent_confirmed_transactions_mutex);
2178  if (m_recent_confirmed_transactions.contains(hash)) return true;
2179  }
2180 
2181  return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
2182 }
2183 
2184 bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2185 {
2186  return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2187 }
2188 
2189 void PeerManagerImpl::SendPings()
2190 {
2191  LOCK(m_peer_mutex);
2192  for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2193 }
2194 
2195 void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2196 {
2197  LOCK(m_peer_mutex);
2198  for(auto& it : m_peer_map) {
2199  Peer& peer = *it.second;
2200  auto tx_relay = peer.GetTxRelay();
2201  if (!tx_relay) continue;
2202 
2203  LOCK(tx_relay->m_tx_inventory_mutex);
2204  // Only queue transactions for announcement once the version handshake
2205  // is completed. The time of arrival for these transactions is
2206  // otherwise at risk of leaking to a spy, if the spy is able to
2207  // distinguish transactions received during the handshake from the rest
2208  // in the announcement.
2209  if (tx_relay->m_next_inv_send_time == 0s) continue;
2210 
2211  const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2212  if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2213  tx_relay->m_tx_inventory_to_send.insert(hash);
2214  }
2215  };
2216 }
2217 
2218 void PeerManagerImpl::RelayAddress(NodeId originator,
2219  const CAddress& addr,
2220  bool fReachable)
2221 {
2222  // We choose the same nodes within a given 24h window (if the list of connected
2223  // nodes does not change) and we don't relay to nodes that already know an
2224  // address. So within 24h we will likely relay a given address once. This is to
2225  // prevent a peer from unjustly giving their address better propagation by sending
2226  // it to us repeatedly.
2227 
2228  if (!fReachable && !addr.IsRelayable()) return;
2229 
2230  // Relay to a limited number of other nodes
2231  // Use deterministic randomness to send to the same nodes for 24 hours
2232  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2233  const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2234  const auto current_time{GetTime<std::chrono::seconds>()};
2235  // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2236  const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2238  .Write(hash_addr)
2239  .Write(time_addr)};
2240 
2241  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2242  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2243 
2244  std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2245  assert(nRelayNodes <= best.size());
2246 
2247  LOCK(m_peer_mutex);
2248 
2249  for (auto& [id, peer] : m_peer_map) {
2250  if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2251  uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2252  for (unsigned int i = 0; i < nRelayNodes; i++) {
2253  if (hashKey > best[i].first) {
2254  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2255  best[i] = std::make_pair(hashKey, peer.get());
2256  break;
2257  }
2258  }
2259  }
2260  };
2261 
2262  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2263  PushAddress(*best[i].second, addr);
2264  }
2265 }
2266 
2267 void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2268 {
2269  std::shared_ptr<const CBlock> a_recent_block;
2270  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2271  {
2272  LOCK(m_most_recent_block_mutex);
2273  a_recent_block = m_most_recent_block;
2274  a_recent_compact_block = m_most_recent_compact_block;
2275  }
2276 
2277  bool need_activate_chain = false;
2278  {
2279  LOCK(cs_main);
2280  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2281  if (pindex) {
2282  if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2283  pindex->IsValid(BLOCK_VALID_TREE)) {
2284  // If we have the block and all of its parents, but have not yet validated it,
2285  // we might be in the middle of connecting it (ie in the unlock of cs_main
2286  // before ActivateBestChain but after AcceptBlock).
2287  // In this case, we need to run ActivateBestChain prior to checking the relay
2288  // conditions below.
2289  need_activate_chain = true;
2290  }
2291  }
2292  } // release cs_main before calling ActivateBestChain
2293  if (need_activate_chain) {
2294  BlockValidationState state;
2295  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2296  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2297  }
2298  }
2299 
2300  LOCK(cs_main);
2301  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2302  if (!pindex) {
2303  return;
2304  }
2305  if (!BlockRequestAllowed(pindex)) {
2306  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2307  return;
2308  }
2309  // disconnect node in case we have reached the outbound limit for serving historical blocks
2310  if (m_connman.OutboundTargetReached(true) &&
2311  (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2312  !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2313  ) {
2314  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
2315  pfrom.fDisconnect = true;
2316  return;
2317  }
2318  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2319  if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
2320  (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2321  )) {
2322  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
2323  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2324  pfrom.fDisconnect = true;
2325  return;
2326  }
2327  // Pruned nodes may have deleted the block, so check whether
2328  // it's available before trying to send.
2329  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2330  return;
2331  }
2332  std::shared_ptr<const CBlock> pblock;
2333  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2334  pblock = a_recent_block;
2335  } else if (inv.IsMsgWitnessBlk()) {
2336  // Fast-path: in this case it is possible to serve the block directly from disk,
2337  // as the network format matches the format on disk
2338  std::vector<uint8_t> block_data;
2339  if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, pindex->GetBlockPos())) {
2340  assert(!"cannot load block from disk");
2341  }
2342  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data});
2343  // Don't set pblock as we've sent the block
2344  } else {
2345  // Send block from disk
2346  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2347  if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
2348  assert(!"cannot load block from disk");
2349  }
2350  pblock = pblockRead;
2351  }
2352  if (pblock) {
2353  if (inv.IsMsgBlk()) {
2354  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock));
2355  } else if (inv.IsMsgWitnessBlk()) {
2356  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2357  } else if (inv.IsMsgFilteredBlk()) {
2358  bool sendMerkleBlock = false;
2359  CMerkleBlock merkleBlock;
2360  if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2361  LOCK(tx_relay->m_bloom_filter_mutex);
2362  if (tx_relay->m_bloom_filter) {
2363  sendMerkleBlock = true;
2364  merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2365  }
2366  }
2367  if (sendMerkleBlock) {
2368  MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
2369  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2370  // This avoids hurting performance by pointlessly requiring a round-trip
2371  // Note that there is currently no way for a node to request any single transactions we didn't send here -
2372  // they must either disconnect and retry or request the full block.
2373  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2374  // however we MUST always provide at least what the remote peer needs
2375  typedef std::pair<unsigned int, uint256> PairType;
2376  for (PairType& pair : merkleBlock.vMatchedTxn)
2377  MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first]));
2378  }
2379  // else
2380  // no response
2381  } else if (inv.IsMsgCmpctBlk()) {
2382  // If a peer is asking for old blocks, we're almost guaranteed
2383  // they won't have a useful mempool to match against a compact block,
2384  // and we don't feel like constructing the object for them, so
2385  // instead we respond with the full, non-compact block.
2386  if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
2387  if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2388  MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
2389  } else {
2390  CBlockHeaderAndShortTxIDs cmpctblock{*pblock};
2391  MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock);
2392  }
2393  } else {
2394  MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2395  }
2396  }
2397  }
2398 
2399  {
2400  LOCK(peer.m_block_inv_mutex);
2401  // Trigger the peer node to send a getblocks request for the next batch of inventory
2402  if (inv.hash == peer.m_continuation_block) {
2403  // Send immediately. This must send even if redundant,
2404  // and we want it right after the last block so they don't
2405  // wait for other stuff first.
2406  std::vector<CInv> vInv;
2407  vInv.emplace_back(MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash());
2408  MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
2409  peer.m_continuation_block.SetNull();
2410  }
2411  }
2412 }
2413 
2414 CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2415 {
2416  // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2417  auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2418  if (txinfo.tx) {
2419  return std::move(txinfo.tx);
2420  }
2421 
2422  // Or it might be from the most recent block
2423  {
2424  LOCK(m_most_recent_block_mutex);
2425  if (m_most_recent_block_txs != nullptr) {
2426  auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2427  if (it != m_most_recent_block_txs->end()) return it->second;
2428  }
2429  }
2430 
2431  return {};
2432 }
2433 
2434 void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2435 {
2437 
2438  auto tx_relay = peer.GetTxRelay();
2439 
2440  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2441  std::vector<CInv> vNotFound;
2442 
2443  // Process as many TX items from the front of the getdata queue as
2444  // possible, since they're common and it's efficient to batch process
2445  // them.
2446  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2447  if (interruptMsgProc) return;
2448  // The send buffer provides backpressure. If there's no space in
2449  // the buffer, pause processing until the next call.
2450  if (pfrom.fPauseSend) break;
2451 
2452  const CInv &inv = *it++;
2453 
2454  if (tx_relay == nullptr) {
2455  // Ignore GETDATA requests for transactions from block-relay-only
2456  // peers and peers that asked us not to announce transactions.
2457  continue;
2458  }
2459 
2460  CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2461  if (tx) {
2462  // WTX and WITNESS_TX imply we serialize with witness
2463  const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS);
2464  MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx));
2465  m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2466  } else {
2467  vNotFound.push_back(inv);
2468  }
2469  }
2470 
2471  // Only process one BLOCK item per call, since they're uncommon and can be
2472  // expensive to process.
2473  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2474  const CInv &inv = *it++;
2475  if (inv.IsGenBlkMsg()) {
2476  ProcessGetBlockData(pfrom, peer, inv);
2477  }
2478  // else: If the first item on the queue is an unknown type, we erase it
2479  // and continue processing the queue on the next call.
2480  }
2481 
2482  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2483 
2484  if (!vNotFound.empty()) {
2485  // Let the peer know that we didn't find what it asked for, so it doesn't
2486  // have to wait around forever.
2487  // SPV clients care about this message: it's needed when they are
2488  // recursively walking the dependencies of relevant unconfirmed
2489  // transactions. SPV clients want to do that because they want to know
2490  // about (and store and rebroadcast and risk analyze) the dependencies
2491  // of transactions relevant to them, without having to download the
2492  // entire memory pool.
2493  // Also, other nodes can use these messages to automatically request a
2494  // transaction from some other peer that announced it, and stop
2495  // waiting for us to respond.
2496  // In normal operation, we often send NOTFOUND messages for parents of
2497  // transactions that we relay; if a peer is missing a parent, they may
2498  // assume we have them and request the parents from us.
2499  MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
2500  }
2501 }
2502 
2503 uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2504 {
2505  uint32_t nFetchFlags = 0;
2506  if (CanServeWitnesses(peer)) {
2507  nFetchFlags |= MSG_WITNESS_FLAG;
2508  }
2509  return nFetchFlags;
2510 }
2511 
2512 void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2513 {
2514  BlockTransactions resp(req);
2515  for (size_t i = 0; i < req.indexes.size(); i++) {
2516  if (req.indexes[i] >= block.vtx.size()) {
2517  Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
2518  return;
2519  }
2520  resp.txn[i] = block.vtx[req.indexes[i]];
2521  }
2522 
2523  MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
2524 }
2525 
2526 bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2527 {
2528  // Do these headers have proof-of-work matching what's claimed?
2529  if (!HasValidProofOfWork(headers, consensusParams)) {
2530  Misbehaving(peer, 100, "header with invalid proof of work");
2531  return false;
2532  }
2533 
2534  // Are these headers connected to each other?
2535  if (!CheckHeadersAreContinuous(headers)) {
2536  Misbehaving(peer, 20, "non-continuous headers sequence");
2537  return false;
2538  }
2539  return true;
2540 }
2541 
2542 arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2543 {
2544  arith_uint256 near_chaintip_work = 0;
2545  LOCK(cs_main);
2546  if (m_chainman.ActiveChain().Tip() != nullptr) {
2547  const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2548  // Use a 144 block buffer, so that we'll accept headers that fork from
2549  // near our tip.
2550  near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2551  }
2552  return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2553 }
2554 
2567 void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
2568  const std::vector<CBlockHeader>& headers)
2569 {
2570  peer.m_num_unconnecting_headers_msgs++;
2571  // Try to fill in the missing headers.
2572  const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2573  if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2574  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
2575  headers[0].GetHash().ToString(),
2576  headers[0].hashPrevBlock.ToString(),
2577  best_header->nHeight,
2578  pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2579  }
2580 
2581  // Set hashLastUnknownBlock for this peer, so that if we
2582  // eventually get the headers - even from a different peer -
2583  // we can use this peer to download.
2584  WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2585 
2586  // The peer may just be broken, so periodically assign DoS points if this
2587  // condition persists.
2588  if (peer.m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0) {
2589  Misbehaving(peer, 20, strprintf("%d non-connecting headers", peer.m_num_unconnecting_headers_msgs));
2590  }
2591 }
2592 
2593 bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2594 {
2595  uint256 hashLastBlock;
2596  for (const CBlockHeader& header : headers) {
2597  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2598  return false;
2599  }
2600  hashLastBlock = header.GetHash();
2601  }
2602  return true;
2603 }
2604 
2605 bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2606 {
2607  if (peer.m_headers_sync) {
2608  auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
2609  if (result.request_more) {
2610  auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2611  // If we were instructed to ask for a locator, it should not be empty.
2612  Assume(!locator.vHave.empty());
2613  if (!locator.vHave.empty()) {
2614  // It should be impossible for the getheaders request to fail,
2615  // because we should have cleared the last getheaders timestamp
2616  // when processing the headers that triggered this call. But
2617  // it may be possible to bypass this via compactblock
2618  // processing, so check the result before logging just to be
2619  // safe.
2620  bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2621  if (sent_getheaders) {
2622  LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2623  locator.vHave.front().ToString(), pfrom.GetId());
2624  } else {
2625  LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n",
2626  locator.vHave.front().ToString(), pfrom.GetId());
2627  }
2628  }
2629  }
2630 
2631  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2632  peer.m_headers_sync.reset(nullptr);
2633 
2634  // Delete this peer's entry in m_headers_presync_stats.
2635  // If this is m_headers_presync_bestpeer, it will be replaced later
2636  // by the next peer that triggers the else{} branch below.
2637  LOCK(m_headers_presync_mutex);
2638  m_headers_presync_stats.erase(pfrom.GetId());
2639  } else {
2640  // Build statistics for this peer's sync.
2641  HeadersPresyncStats stats;
2642  stats.first = peer.m_headers_sync->GetPresyncWork();
2643  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2644  stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2645  peer.m_headers_sync->GetPresyncTime()};
2646  }
2647 
2648  // Update statistics in stats.
2649  LOCK(m_headers_presync_mutex);
2650  m_headers_presync_stats[pfrom.GetId()] = stats;
2651  auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2652  bool best_updated = false;
2653  if (best_it == m_headers_presync_stats.end()) {
2654  // If the cached best peer is outdated, iterate over all remaining ones (including
2655  // newly updated one) to find the best one.
2656  NodeId peer_best{-1};
2657  const HeadersPresyncStats* stat_best{nullptr};
2658  for (const auto& [peer, stat] : m_headers_presync_stats) {
2659  if (!stat_best || stat > *stat_best) {
2660  peer_best = peer;
2661  stat_best = &stat;
2662  }
2663  }
2664  m_headers_presync_bestpeer = peer_best;
2665  best_updated = (peer_best == pfrom.GetId());
2666  } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2667  // pfrom was and remains the best peer, or pfrom just became best.
2668  m_headers_presync_bestpeer = pfrom.GetId();
2669  best_updated = true;
2670  }
2671  if (best_updated && stats.second.has_value()) {
2672  // If the best peer updated, and it is in its first phase, signal.
2673  m_headers_presync_should_signal = true;
2674  }
2675  }
2676 
2677  if (result.success) {
2678  // We only overwrite the headers passed in if processing was
2679  // successful.
2680  headers.swap(result.pow_validated_headers);
2681  }
2682 
2683  return result.success;
2684  }
2685  // Either we didn't have a sync in progress, or something went wrong
2686  // processing these headers, or we are returning headers to the caller to
2687  // process.
2688  return false;
2689 }
2690 
2691 bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2692 {
2693  // Calculate the total work on this chain.
2694  arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers);
2695 
2696  // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2697  // before we'll store it)
2698  arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2699 
2700  // Avoid DoS via low-difficulty-headers by only processing if the headers
2701  // are part of a chain with sufficient work.
2702  if (total_work < minimum_chain_work) {
2703  // Only try to sync with this peer if their headers message was full;
2704  // otherwise they don't have more headers after this so no point in
2705  // trying to sync their too-little-work chain.
2706  if (headers.size() == MAX_HEADERS_RESULTS) {
2707  // Note: we could advance to the last header in this set that is
2708  // known to us, rather than starting at the first header (which we
2709  // may already have); however this is unlikely to matter much since
2710  // ProcessHeadersMessage() already handles the case where all
2711  // headers in a received message are already known and are
2712  // ancestors of m_best_header or chainActive.Tip(), by skipping
2713  // this logic in that case. So even if the first header in this set
2714  // of headers is known, some header in this set must be new, so
2715  // advancing to the first unknown header would be a small effect.
2716  LOCK(peer.m_headers_sync_mutex);
2717  peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2718  chain_start_header, minimum_chain_work));
2719 
2720  // Now a HeadersSyncState object for tracking this synchronization
2721  // is created, process the headers using it as normal. Failures are
2722  // handled inside of IsContinuationOfLowWorkHeadersSync.
2723  (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2724  } else {
2725  LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2726  }
2727 
2728  // The peer has not yet given us a chain that meets our work threshold,
2729  // so we want to prevent further processing of the headers in any case.
2730  headers = {};
2731  return true;
2732  }
2733 
2734  return false;
2735 }
2736 
2737 bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2738 {
2739  if (header == nullptr) {
2740  return false;
2741  } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2742  return true;
2743  } else if (m_chainman.ActiveChain().Contains(header)) {
2744  return true;
2745  }
2746  return false;
2747 }
2748 
2749 bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2750 {
2751  const auto current_time = NodeClock::now();
2752 
2753  // Only allow a new getheaders message to go out if we don't have a recent
2754  // one already in-flight
2755  if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2756  MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
2757  peer.m_last_getheaders_timestamp = current_time;
2758  return true;
2759  }
2760  return false;
2761 }
2762 
2763 /*
2764  * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2765  * We require that the given tip have at least as much work as our tip, and for
2766  * our current tip to be "close to synced" (see CanDirectFetch()).
2767  */
2768 void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2769 {
2770  LOCK(cs_main);
2771  CNodeState *nodestate = State(pfrom.GetId());
2772 
2773  if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2774  std::vector<const CBlockIndex*> vToFetch;
2775  const CBlockIndex* pindexWalk{&last_header};
2776  // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2777  while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2778  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2779  !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2780  (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2781  // We don't have this block, and it's not yet in flight.
2782  vToFetch.push_back(pindexWalk);
2783  }
2784  pindexWalk = pindexWalk->pprev;
2785  }
2786  // If pindexWalk still isn't on our main chain, we're looking at a
2787  // very large reorg at a time we think we're close to caught up to
2788  // the main chain -- this shouldn't really happen. Bail out on the
2789  // direct fetch and rely on parallel download instead.
2790  if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2791  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2792  last_header.GetBlockHash().ToString(),
2793  last_header.nHeight);
2794  } else {
2795  std::vector<CInv> vGetData;
2796  // Download as much as possible, from earliest to latest.
2797  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2798  if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2799  // Can't download any more from this peer
2800  break;
2801  }
2802  uint32_t nFetchFlags = GetFetchFlags(peer);
2803  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2804  BlockRequested(pfrom.GetId(), *pindex);
2805  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2806  pindex->GetBlockHash().ToString(), pfrom.GetId());
2807  }
2808  if (vGetData.size() > 1) {
2809  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2810  last_header.GetBlockHash().ToString(),
2811  last_header.nHeight);
2812  }
2813  if (vGetData.size() > 0) {
2814  if (!m_opts.ignore_incoming_txs &&
2815  nodestate->m_provides_cmpctblocks &&
2816  vGetData.size() == 1 &&
2817  mapBlocksInFlight.size() == 1 &&
2818  last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2819  // In any case, we want to download using a compact block, not a regular one
2820  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2821  }
2822  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
2823  }
2824  }
2825  }
2826 }
2827 
2833 void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2834  const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2835 {
2836  if (peer.m_num_unconnecting_headers_msgs > 0) {
2837  LogPrint(BCLog::NET, "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n", pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2838  }
2839  peer.m_num_unconnecting_headers_msgs = 0;
2840 
2841  LOCK(cs_main);
2842  CNodeState *nodestate = State(pfrom.GetId());
2843 
2844  UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2845 
2846  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2847  // because it is set in UpdateBlockAvailability. Some nullptr checks
2848  // are still present, however, as belt-and-suspenders.
2849 
2850  if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2851  nodestate->m_last_block_announcement = GetTime();
2852  }
2853 
2854  // If we're in IBD, we want outbound peers that will serve us a useful
2855  // chain. Disconnect peers that are on chains with insufficient work.
2856  if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2857  // If the peer has no more headers to give us, then we know we have
2858  // their tip.
2859  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2860  // This peer has too little work on their headers chain to help
2861  // us sync -- disconnect if it is an outbound disconnection
2862  // candidate.
2863  // Note: We compare their tip to the minimum chain work (rather than
2864  // m_chainman.ActiveChain().Tip()) because we won't start block download
2865  // until we have a headers chain that has at least
2866  // the minimum chain work, even if a peer has a chain past our tip,
2867  // as an anti-DoS measure.
2868  if (pfrom.IsOutboundOrBlockRelayConn()) {
2869  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
2870  pfrom.fDisconnect = true;
2871  }
2872  }
2873  }
2874 
2875  // If this is an outbound full-relay peer, check to see if we should protect
2876  // it from the bad/lagging chain logic.
2877  // Note that outbound block-relay peers are excluded from this protection, and
2878  // thus always subject to eviction under the bad/lagging chain logic.
2879  // See ChainSyncTimeoutState.
2880  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2881  if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2882  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2883  nodestate->m_chain_sync.m_protect = true;
2884  ++m_outbound_peers_with_protect_from_disconnect;
2885  }
2886  }
2887 }
2888 
2889 void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2890  std::vector<CBlockHeader>&& headers,
2891  bool via_compact_block)
2892 {
2893  size_t nCount = headers.size();
2894 
2895  if (nCount == 0) {
2896  // Nothing interesting. Stop asking this peers for more headers.
2897  // If we were in the middle of headers sync, receiving an empty headers
2898  // message suggests that the peer suddenly has nothing to give us
2899  // (perhaps it reorged to our chain). Clear download state for this peer.
2900  LOCK(peer.m_headers_sync_mutex);
2901  if (peer.m_headers_sync) {
2902  peer.m_headers_sync.reset(nullptr);
2903  LOCK(m_headers_presync_mutex);
2904  m_headers_presync_stats.erase(pfrom.GetId());
2905  }
2906  return;
2907  }
2908 
2909  // Before we do any processing, make sure these pass basic sanity checks.
2910  // We'll rely on headers having valid proof-of-work further down, as an
2911  // anti-DoS criteria (note: this check is required before passing any
2912  // headers into HeadersSyncState).
2913  if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
2914  // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2915  // just return. (Note that even if a header is announced via compact
2916  // block, the header itself should be valid, so this type of error can
2917  // always be punished.)
2918  return;
2919  }
2920 
2921  const CBlockIndex *pindexLast = nullptr;
2922 
2923  // We'll set already_validated_work to true if these headers are
2924  // successfully processed as part of a low-work headers sync in progress
2925  // (either in PRESYNC or REDOWNLOAD phase).
2926  // If true, this will mean that any headers returned to us (ie during
2927  // REDOWNLOAD) can be validated without further anti-DoS checks.
2928  bool already_validated_work = false;
2929 
2930  // If we're in the middle of headers sync, let it do its magic.
2931  bool have_headers_sync = false;
2932  {
2933  LOCK(peer.m_headers_sync_mutex);
2934 
2935  already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2936 
2937  // The headers we passed in may have been:
2938  // - untouched, perhaps if no headers-sync was in progress, or some
2939  // failure occurred
2940  // - erased, such as if the headers were successfully processed and no
2941  // additional headers processing needs to take place (such as if we
2942  // are still in PRESYNC)
2943  // - replaced with headers that are now ready for validation, such as
2944  // during the REDOWNLOAD phase of a low-work headers sync.
2945  // So just check whether we still have headers that we need to process,
2946  // or not.
2947  if (headers.empty()) {
2948  return;
2949  }
2950 
2951  have_headers_sync = !!peer.m_headers_sync;
2952  }
2953 
2954  // Do these headers connect to something in our block index?
2955  const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
2956  bool headers_connect_blockindex{chain_start_header != nullptr};
2957 
2958  if (!headers_connect_blockindex) {
2959  if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
2960  // If this looks like it could be a BIP 130 block announcement, use
2961  // special logic for handling headers that don't connect, as this
2962  // could be benign.
2963  HandleFewUnconnectingHeaders(pfrom, peer, headers);
2964  } else {
2965  Misbehaving(peer, 10, "invalid header received");
2966  }
2967  return;
2968  }
2969 
2970  // If the headers we received are already in memory and an ancestor of
2971  // m_best_header or our tip, skip anti-DoS checks. These headers will not
2972  // use any more memory (and we are not leaking information that could be
2973  // used to fingerprint us).
2974  const CBlockIndex *last_received_header{nullptr};
2975  {
2976  LOCK(cs_main);
2977  last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
2978  if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2979  already_validated_work = true;
2980  }
2981  }
2982 
2983  // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
2984  // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
2985  // on startup).
2987  already_validated_work = true;
2988  }
2989 
2990  // At this point, the headers connect to something in our block index.
2991  // Do anti-DoS checks to determine if we should process or store for later
2992  // processing.
2993  if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2994  chain_start_header, headers)) {
2995  // If we successfully started a low-work headers sync, then there
2996  // should be no headers to process any further.
2997  Assume(headers.empty());
2998  return;
2999  }
3000 
3001  // At this point, we have a set of headers with sufficient work on them
3002  // which can be processed.
3003 
3004  // If we don't have the last header, then this peer will have given us
3005  // something new (if these headers are valid).
3006  bool received_new_header{last_received_header == nullptr};
3007 
3008  // Now process all the headers.
3009  BlockValidationState state;
3010  if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) {
3011  if (state.IsInvalid()) {
3012  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
3013  return;
3014  }
3015  }
3016  assert(pindexLast);
3017 
3018  // Consider fetching more headers if we are not using our headers-sync mechanism.
3019  if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
3020  // Headers message had its maximum size; the peer may have more headers.
3021  if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
3022  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
3023  pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
3024  }
3025  }
3026 
3027  UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
3028 
3029  // Consider immediately downloading blocks.
3030  HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
3031 
3032  return;
3033 }
3034 
3035 bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3036 {
3037  AssertLockHeld(g_msgproc_mutex);
3038  LOCK(cs_main);
3039 
3040  CTransactionRef porphanTx = nullptr;
3041 
3042  while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) {
3043  const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3044  const TxValidationState& state = result.m_state;
3045  const Txid& orphanHash = porphanTx->GetHash();
3046  const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3047 
3049  LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3050  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3051  peer.m_id,
3052  orphanHash.ToString(),
3053  orphan_wtxid.ToString(),
3054  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3055  RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
3056  m_orphanage.AddChildrenToWorkSet(*porphanTx);
3057  m_orphanage.EraseTx(orphanHash);
3058  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
3059  AddToCompactExtraTransactions(removedTx);
3060  }
3061  return true;
3062  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3063  if (state.IsInvalid()) {
3064  LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3065  orphanHash.ToString(),
3066  orphan_wtxid.ToString(),
3067  peer.m_id,
3068  state.ToString());
3069  LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
3070  orphanHash.ToString(),
3071  orphan_wtxid.ToString(),
3072  peer.m_id,
3073  state.ToString());
3074  // Maybe punish peer that gave us an invalid orphan tx
3075  MaybePunishNodeForTx(peer.m_id, state);
3076  }
3077  // Has inputs but not accepted to mempool
3078  // Probably non-standard or insufficient fee
3079  LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3081  // We can add the wtxid of this transaction to our reject filter.
3082  // Do not add txids of witness transactions or witness-stripped
3083  // transactions to the filter, as they can have been malleated;
3084  // adding such txids to the reject filter would potentially
3085  // interfere with relay of valid transactions from peers that
3086  // do not support wtxid-based relay. See
3087  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3088  // We can remove this restriction (and always add wtxids to
3089  // the filter even for witness stripped transactions) once
3090  // wtxid-based relay is broadly deployed.
3091  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3092  // for concerns around weakening security of unupgraded nodes
3093  // if we start doing this too early.
3094  m_recent_rejects.insert(porphanTx->GetWitnessHash().ToUint256());
3095  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3096  // then we know that the witness was irrelevant to the policy
3097  // failure, since this check depends only on the txid
3098  // (the scriptPubKey being spent is covered by the txid).
3099  // Add the txid to the reject filter to prevent repeated
3100  // processing of this transaction in the event that child
3101  // transactions are later received (resulting in
3102  // parent-fetching by txid via the orphan-handling logic).
3103  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->HasWitness()) {
3104  // We only add the txid if it differs from the wtxid, to
3105  // avoid wasting entries in the rolling bloom filter.
3106  m_recent_rejects.insert(porphanTx->GetHash().ToUint256());
3107  }
3108  }
3109  m_orphanage.EraseTx(orphanHash);
3110  return true;
3111  }
3112  }
3113 
3114  return false;
3115 }
3116 
3117 bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3118  BlockFilterType filter_type, uint32_t start_height,
3119  const uint256& stop_hash, uint32_t max_height_diff,
3120  const CBlockIndex*& stop_index,
3121  BlockFilterIndex*& filter_index)
3122 {
3123  const bool supported_filter_type =
3124  (filter_type == BlockFilterType::BASIC &&
3125  (peer.m_our_services & NODE_COMPACT_FILTERS));
3126  if (!supported_filter_type) {
3127  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
3128  node.GetId(), static_cast<uint8_t>(filter_type));
3129  node.fDisconnect = true;
3130  return false;
3131  }
3132 
3133  {
3134  LOCK(cs_main);
3135  stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3136 
3137  // Check that the stop block exists and the peer would be allowed to fetch it.
3138  if (!stop_index || !BlockRequestAllowed(stop_index)) {
3139  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
3140  node.GetId(), stop_hash.ToString());
3141  node.fDisconnect = true;
3142  return false;
3143  }
3144  }
3145 
3146  uint32_t stop_height = stop_index->nHeight;
3147  if (start_height > stop_height) {
3148  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with "
3149  "start height %d and stop height %d\n",
3150  node.GetId(), start_height, stop_height);
3151  node.fDisconnect = true;
3152  return false;
3153  }
3154  if (stop_height - start_height >= max_height_diff) {
3155  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
3156  node.GetId(), stop_height - start_height + 1, max_height_diff);
3157  node.fDisconnect = true;
3158  return false;
3159  }
3160 
3161  filter_index = GetBlockFilterIndex(filter_type);
3162  if (!filter_index) {
3163  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3164  return false;
3165  }
3166 
3167  return true;
3168 }
3169 
3170 void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv)
3171 {
3172  uint8_t filter_type_ser;
3173  uint32_t start_height;
3174  uint256 stop_hash;
3175 
3176  vRecv >> filter_type_ser >> start_height >> stop_hash;
3177 
3178  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3179 
3180  const CBlockIndex* stop_index;
3181  BlockFilterIndex* filter_index;
3182  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3183  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3184  return;
3185  }
3186 
3187  std::vector<BlockFilter> filters;
3188  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3189  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3190  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3191  return;
3192  }
3193 
3194  for (const auto& filter : filters) {
3195  MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
3196  }
3197 }
3198 
3199 void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv)
3200 {
3201  uint8_t filter_type_ser;
3202  uint32_t start_height;
3203  uint256 stop_hash;
3204 
3205  vRecv >> filter_type_ser >> start_height >> stop_hash;
3206 
3207  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3208 
3209  const CBlockIndex* stop_index;
3210  BlockFilterIndex* filter_index;
3211  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3212  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3213  return;
3214  }
3215 
3216  uint256 prev_header;
3217  if (start_height > 0) {
3218  const CBlockIndex* const prev_block =
3219  stop_index->GetAncestor(static_cast<int>(start_height - 1));
3220  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3221  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3222  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3223  return;
3224  }
3225  }
3226 
3227  std::vector<uint256> filter_hashes;
3228  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3229  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3230  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3231  return;
3232  }
3233 
3234  MakeAndPushMessage(node, NetMsgType::CFHEADERS,
3235  filter_type_ser,
3236  stop_index->GetBlockHash(),
3237  prev_header,
3238  filter_hashes);
3239 }
3240 
3241 void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv)
3242 {
3243  uint8_t filter_type_ser;
3244  uint256 stop_hash;
3245 
3246  vRecv >> filter_type_ser >> stop_hash;
3247 
3248  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3249 
3250  const CBlockIndex* stop_index;
3251  BlockFilterIndex* filter_index;
3252  if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3253  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3254  stop_index, filter_index)) {
3255  return;
3256  }
3257 
3258  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3259 
3260  // Populate headers.
3261  const CBlockIndex* block_index = stop_index;
3262  for (int i = headers.size() - 1; i >= 0; i--) {
3263  int height = (i + 1) * CFCHECKPT_INTERVAL;
3264  block_index = block_index->GetAncestor(height);
3265 
3266  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3267  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3268  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3269  return;
3270  }
3271  }
3272 
3273  MakeAndPushMessage(node, NetMsgType::CFCHECKPT,
3274  filter_type_ser,
3275  stop_index->GetBlockHash(),
3276  headers);
3277 }
3278 
3279 void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3280 {
3281  bool new_block{false};
3282  m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3283  if (new_block) {
3284  node.m_last_block_time = GetTime<std::chrono::seconds>();
3285  // In case this block came from a different peer than we requested
3286  // from, we can erase the block request now anyway (as we just stored
3287  // this block to disk).
3288  LOCK(cs_main);
3289  RemoveBlockRequest(block->GetHash(), std::nullopt);
3290  } else {
3291  LOCK(cs_main);
3292  mapBlockSource.erase(block->GetHash());
3293  }
3294 }
3295 
3296 void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3297 {
3298  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3299  bool fBlockRead{false};
3300  {
3301  LOCK(cs_main);
3302 
3303  auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3304  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3305  bool requested_block_from_this_peer{false};
3306 
3307  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3308  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3309 
3310  while (range_flight.first != range_flight.second) {
3311  auto [node_id, block_it] = range_flight.first->second;
3312  if (node_id == pfrom.GetId() && block_it->partialBlock) {
3313  requested_block_from_this_peer = true;
3314  break;
3315  }
3316  range_flight.first++;
3317  }
3318 
3319  if (!requested_block_from_this_peer) {
3320  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3321  return;
3322  }
3323 
3324  PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3325  ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3326  if (status == READ_STATUS_INVALID) {
3327  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3328  Misbehaving(peer, 100, "invalid compact block/non-matching block transactions");
3329  return;
3330  } else if (status == READ_STATUS_FAILED) {
3331  if (first_in_flight) {
3332  // Might have collided, fall back to getdata now :(
3333  std::vector<CInv> invs;
3334  invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3335  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
3336  } else {
3337  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3338  LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3339  return;
3340  }
3341  } else {
3342  // Block is either okay, or possibly we received
3343  // READ_STATUS_CHECKBLOCK_FAILED.
3344  // Note that CheckBlock can only fail for one of a few reasons:
3345  // 1. bad-proof-of-work (impossible here, because we've already
3346  // accepted the header)
3347  // 2. merkleroot doesn't match the transactions given (already
3348  // caught in FillBlock with READ_STATUS_FAILED, so
3349  // impossible here)
3350  // 3. the block is otherwise invalid (eg invalid coinbase,
3351  // block is too big, too many legacy sigops, etc).
3352  // So if CheckBlock failed, #3 is the only possibility.
3353  // Under BIP 152, we don't discourage the peer unless proof of work is
3354  // invalid (we don't require all the stateless checks to have
3355  // been run). This is handled below, so just treat this as
3356  // though the block was successfully read, and rely on the
3357  // handling in ProcessNewBlock to ensure the block index is
3358  // updated, etc.
3359  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3360  fBlockRead = true;
3361  // mapBlockSource is used for potentially punishing peers and
3362  // updating which peers send us compact blocks, so the race
3363  // between here and cs_main in ProcessNewBlock is fine.
3364  // BIP 152 permits peers to relay compact blocks after validating
3365  // the header only; we should not punish peers if the block turns
3366  // out to be invalid.
3367  mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3368  }
3369  } // Don't hold cs_main when we call into ProcessNewBlock
3370  if (fBlockRead) {
3371  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3372  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3373  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3374  // disk-space attacks), but this should be safe due to the
3375  // protections in the compact block handler -- see related comment
3376  // in compact block optimistic reconstruction handling.
3377  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3378  }
3379  return;
3380 }
3381 
3382 void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
3383  const std::chrono::microseconds time_received,
3384  const std::atomic<bool>& interruptMsgProc)
3385 {
3386  AssertLockHeld(g_msgproc_mutex);
3387 
3388  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3389 
3390  PeerRef peer = GetPeerRef(pfrom.GetId());
3391  if (peer == nullptr) return;
3392 
3393  if (msg_type == NetMsgType::VERSION) {
3394  if (pfrom.nVersion != 0) {
3395  LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3396  return;
3397  }
3398 
3399  int64_t nTime;
3400  CService addrMe;
3401  uint64_t nNonce = 1;
3402  ServiceFlags nServices;
3403  int nVersion;
3404  std::string cleanSubVer;
3405  int starting_height = -1;
3406  bool fRelay = true;
3407 
3408  vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3409  if (nTime < 0) {
3410  nTime = 0;
3411  }
3412  vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3413  vRecv >> CNetAddr::V1(addrMe);
3414  if (!pfrom.IsInboundConn())
3415  {
3416  // Overwrites potentially existing services. In contrast to this,
3417  // unvalidated services received via gossip relay in ADDR/ADDRV2
3418  // messages are only ever added but cannot replace existing ones.
3419  m_addrman.SetServices(pfrom.addr, nServices);
3420  }
3421  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3422  {
3423  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
3424  pfrom.fDisconnect = true;
3425  return;
3426  }
3427 
3428  if (nVersion < MIN_PEER_PROTO_VERSION) {
3429  // disconnect from peers older than this proto version
3430  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
3431  pfrom.fDisconnect = true;
3432  return;
3433  }
3434 
3435  if (!vRecv.empty()) {
3436  // The version message includes information about the sending node which we don't use:
3437  // - 8 bytes (service bits)
3438  // - 16 bytes (ipv6 address)
3439  // - 2 bytes (port)
3440  vRecv.ignore(26);
3441  vRecv >> nNonce;
3442  }
3443  if (!vRecv.empty()) {
3444  std::string strSubVer;
3445  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3446  cleanSubVer = SanitizeString(strSubVer);
3447  }
3448  if (!vRecv.empty()) {
3449  vRecv >> starting_height;
3450  }
3451  if (!vRecv.empty())
3452  vRecv >> fRelay;
3453  // Disconnect if we connected to ourself
3454  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3455  {
3456  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3457  pfrom.fDisconnect = true;
3458  return;
3459  }
3460 
3461  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3462  {
3463  SeenLocal(addrMe);
3464  }
3465 
3466  // Inbound peers send us their version message when they connect.
3467  // We send our version message in response.
3468  if (pfrom.IsInboundConn()) {
3469  PushNodeVersion(pfrom, *peer);
3470  }
3471 
3472  // Change version
3473  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3474  pfrom.SetCommonVersion(greatest_common_version);
3475  pfrom.nVersion = nVersion;
3476 
3477  if (greatest_common_version >= WTXID_RELAY_VERSION) {
3478  MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY);
3479  }
3480 
3481  // Signal ADDRv2 support (BIP155).
3482  if (greatest_common_version >= 70016) {
3483  // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3484  // implementations reject messages they don't know. As a courtesy, don't send
3485  // it to nodes with a version before 70016, as no software is known to support
3486  // BIP155 that doesn't announce at least that protocol version number.
3487  MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
3488  }
3489 
3490  pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3491  peer->m_their_services = nServices;
3492  pfrom.SetAddrLocal(addrMe);
3493  {
3494  LOCK(pfrom.m_subver_mutex);
3495  pfrom.cleanSubVer = cleanSubVer;
3496  }
3497  peer->m_starting_height = starting_height;
3498 
3499  // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3500  // - this isn't an outbound block-relay-only connection, and
3501  // - this isn't an outbound feeler connection, and
3502  // - fRelay=true (the peer wishes to receive transaction announcements)
3503  // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3504  // the peer may turn on transaction relay later.
3505  if (!pfrom.IsBlockOnlyConn() &&
3506  !pfrom.IsFeelerConn() &&
3507  (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3508  auto* const tx_relay = peer->SetTxRelay();
3509  {
3510  LOCK(tx_relay->m_bloom_filter_mutex);
3511  tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3512  }
3513  if (fRelay) pfrom.m_relays_txs = true;
3514  }
3515 
3516  if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3517  // Per BIP-330, we announce txreconciliation support if:
3518  // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3519  // - transaction relay is supported per the peer's VERSION message
3520  // - this is not a block-relay-only connection and not a feeler
3521  // - this is not an addr fetch connection;
3522  // - we are not in -blocksonly mode.
3523  const auto* tx_relay = peer->GetTxRelay();
3524  if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3525  !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3526  const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3527  MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL,
3528  TXRECONCILIATION_VERSION, recon_salt);
3529  }
3530  }
3531 
3532  MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3533 
3534  // Potentially mark this peer as a preferred download peer.
3535  {
3536  LOCK(cs_main);
3537  CNodeState* state = State(pfrom.GetId());
3538  state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3539  m_num_preferred_download_peers += state->fPreferredDownload;
3540  }
3541 
3542  // Attempt to initialize address relay for outbound peers and use result
3543  // to decide whether to send GETADDR, so that we don't send it to
3544  // inbound or outbound block-relay-only peers.
3545  bool send_getaddr{false};
3546  if (!pfrom.IsInboundConn()) {
3547  send_getaddr = SetupAddressRelay(pfrom, *peer);
3548  }
3549  if (send_getaddr) {
3550  // Do a one-time address fetch to help populate/update our addrman.
3551  // If we're starting up for the first time, our addrman may be pretty
3552  // empty, so this mechanism is important to help us connect to the network.
3553  // We skip this for block-relay-only peers. We want to avoid
3554  // potentially leaking addr information and we do not want to
3555  // indicate to the peer that we will participate in addr relay.
3556  MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
3557  peer->m_getaddr_sent = true;
3558  // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3559  // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3560  peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3561  }
3562 
3563  if (!pfrom.IsInboundConn()) {
3564  // For non-inbound connections, we update the addrman to record
3565  // connection success so that addrman will have an up-to-date
3566  // notion of which peers are online and available.
3567  //
3568  // While we strive to not leak information about block-relay-only
3569  // connections via the addrman, not moving an address to the tried
3570  // table is also potentially detrimental because new-table entries
3571  // are subject to eviction in the event of addrman collisions. We
3572  // mitigate the information-leak by never calling
3573  // AddrMan::Connected() on block-relay-only peers; see
3574  // FinalizeNode().
3575  //
3576  // This moves an address from New to Tried table in Addrman,
3577  // resolves tried-table collisions, etc.
3578  m_addrman.Good(pfrom.addr);
3579  }
3580 
3581  std::string remoteAddr;
3582  if (fLogIPs)
3583  remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
3584 
3585  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3586  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3587  cleanSubVer, pfrom.nVersion,
3588  peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3589  remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3590 
3591  int64_t nTimeOffset = nTime - GetTime();
3592  pfrom.nTimeOffset = nTimeOffset;
3593  if (!pfrom.IsInboundConn()) {
3594  // Don't use timedata samples from inbound peers to make it
3595  // harder for others to tamper with our adjusted time.
3596  AddTimeData(pfrom.addr, nTimeOffset);
3597  }
3598 
3599  // If the peer is old enough to have the old alert system, send it the final alert.
3600  if (greatest_common_version <= 70012) {
3601  const auto finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
3602  MakeAndPushMessage(pfrom, "alert", Span{finalAlert});
3603  }
3604 
3605  // Feeler connections exist only to verify if address is online.
3606  if (pfrom.IsFeelerConn()) {
3607  LogPrint(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
3608  pfrom.fDisconnect = true;
3609  }
3610  return;
3611  }
3612 
3613  if (pfrom.nVersion == 0) {
3614  // Must have a version message before anything else
3615  LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3616  return;
3617  }
3618 
3619  if (msg_type == NetMsgType::VERACK) {
3620  if (pfrom.fSuccessfullyConnected) {
3621  LogPrint(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3622  return;
3623  }
3624 
3625  // Log successful connections unconditionally for outbound, but not for inbound as those
3626  // can be triggered by an attacker at high rate.
3628  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3629  LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3630  pfrom.ConnectionTypeAsString(),
3631  TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3632  pfrom.nVersion.load(), peer->m_starting_height,
3633  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
3634  (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3635  }
3636 
3637  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3638  // Tell our peer we are willing to provide version 2 cmpctblocks.
3639  // However, we do not request new block announcements using
3640  // cmpctblock messages.
3641  // We send this to non-NODE NETWORK peers as well, because
3642  // they may wish to request compact blocks from us
3643  MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
3644  }
3645 
3646  if (m_txreconciliation) {
3647  if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3648  // We could have optimistically pre-registered/registered the peer. In that case,
3649  // we should forget about the reconciliation state here if this wasn't followed
3650  // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3651  m_txreconciliation->ForgetPeer(pfrom.GetId());
3652  }
3653  }
3654 
3655  if (auto tx_relay = peer->GetTxRelay()) {
3656  // `TxRelay::m_tx_inventory_to_send` must be empty before the
3657  // version handshake is completed as
3658  // `TxRelay::m_next_inv_send_time` is first initialised in
3659  // `SendMessages` after the verack is received. Any transactions
3660  // received during the version handshake would otherwise
3661  // immediately be advertised without random delay, potentially
3662  // leaking the time of arrival to a spy.
3663  Assume(WITH_LOCK(
3664  tx_relay->m_tx_inventory_mutex,
3665  return tx_relay->m_tx_inventory_to_send.empty() &&
3666  tx_relay->m_next_inv_send_time == 0s));
3667  }
3668 
3669  pfrom.fSuccessfullyConnected = true;
3670  return;
3671  }
3672 
3673  if (msg_type == NetMsgType::SENDHEADERS) {
3674  peer->m_prefers_headers = true;
3675  return;
3676  }
3677 
3678  if (msg_type == NetMsgType::SENDCMPCT) {
3679  bool sendcmpct_hb{false};
3680  uint64_t sendcmpct_version{0};
3681  vRecv >> sendcmpct_hb >> sendcmpct_version;
3682 
3683  // Only support compact block relay with witnesses
3684  if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3685 
3686  LOCK(cs_main);
3687  CNodeState* nodestate = State(pfrom.GetId());
3688  nodestate->m_provides_cmpctblocks = true;
3689  nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3690  // save whether peer selects us as BIP152 high-bandwidth peer
3691  // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3692  pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3693  return;
3694  }
3695 
3696  // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3697  // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3698  if (msg_type == NetMsgType::WTXIDRELAY) {
3699  if (pfrom.fSuccessfullyConnected) {
3700  // Disconnect peers that send a wtxidrelay message after VERACK.
3701  LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3702  pfrom.fDisconnect = true;
3703  return;
3704  }
3705  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3706  if (!peer->m_wtxid_relay) {
3707  peer->m_wtxid_relay = true;
3708  m_wtxid_relay_peers++;
3709  } else {
3710  LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3711  }
3712  } else {
3713  LogPrint(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3714  }
3715  return;
3716  }
3717 
3718  // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3719  // between VERSION and VERACK.
3720  if (msg_type == NetMsgType::SENDADDRV2) {
3721  if (pfrom.fSuccessfullyConnected) {
3722  // Disconnect peers that send a SENDADDRV2 message after VERACK.
3723  LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3724  pfrom.fDisconnect = true;
3725  return;
3726  }
3727  peer->m_wants_addrv2 = true;
3728  return;
3729  }
3730 
3731  // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3732  // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3733  // from switching announcement protocols after the connection is up.
3734  if (msg_type == NetMsgType::SENDTXRCNCL) {
3735  if (!m_txreconciliation) {
3736  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3737  return;
3738  }
3739 
3740  if (pfrom.fSuccessfullyConnected) {
3741  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3742  pfrom.fDisconnect = true;
3743  return;
3744  }
3745 
3746  // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3747  if (RejectIncomingTxs(pfrom)) {
3748  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId());
3749  pfrom.fDisconnect = true;
3750  return;
3751  }
3752 
3753  // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3754  // This flag might also be false in other cases, but the RejectIncomingTxs check above
3755  // eliminates them, so that this flag fully represents what we are looking for.
3756  const auto* tx_relay = peer->GetTxRelay();
3757  if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3758  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId());
3759  pfrom.fDisconnect = true;
3760  return;
3761  }
3762 
3763  uint32_t peer_txreconcl_version;
3764  uint64_t remote_salt;
3765  vRecv >> peer_txreconcl_version >> remote_salt;
3766 
3767  const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3768  peer_txreconcl_version, remote_salt);
3769  switch (result) {
3771  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
3772  break;
3774  break;
3776  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId());
3777  pfrom.fDisconnect = true;
3778  return;
3780  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId());
3781  pfrom.fDisconnect = true;
3782  return;
3783  }
3784  return;
3785  }
3786 
3787  if (!pfrom.fSuccessfullyConnected) {
3788  LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3789  return;
3790  }
3791 
3792  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3793  const auto ser_params{
3794  msg_type == NetMsgType::ADDRV2 ?
3795  // Set V2 param so that the CNetAddr and CAddress
3796  // unserialize methods know that an address in v2 format is coming.
3799  };
3800 
3801  std::vector<CAddress> vAddr;
3802 
3803  vRecv >> ser_params(vAddr);
3804 
3805  if (!SetupAddressRelay(pfrom, *peer)) {
3806  LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3807  return;
3808  }
3809 
3810  if (vAddr.size() > MAX_ADDR_TO_SEND)
3811  {
3812  Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
3813  return;
3814  }
3815 
3816  // Store the new addresses
3817  std::vector<CAddress> vAddrOk;
3818  const auto current_a_time{Now<NodeSeconds>()};
3819 
3820  // Update/increment addr rate limiting bucket.
3821  const auto current_time{GetTime<std::chrono::microseconds>()};
3822  if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3823  // Don't increment bucket if it's already full
3824  const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3825  const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
3826  peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
3827  }
3828  peer->m_addr_token_timestamp = current_time;
3829 
3830  const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
3831  uint64_t num_proc = 0;
3832  uint64_t num_rate_limit = 0;
3833  Shuffle(vAddr.begin(), vAddr.end(), m_rng);
3834  for (CAddress& addr : vAddr)
3835  {
3836  if (interruptMsgProc)
3837  return;
3838 
3839  // Apply rate limiting.
3840  if (peer->m_addr_token_bucket < 1.0) {
3841  if (rate_limited) {
3842  ++num_rate_limit;
3843  continue;
3844  }
3845  } else {
3846  peer->m_addr_token_bucket -= 1.0;
3847  }
3848  // We only bother storing full nodes, though this may include
3849  // things which we would not make an outbound connection to, in
3850  // part because we may make feeler connections to them.
3851  if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
3852  continue;
3853 
3854  if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
3855  addr.nTime = current_a_time - 5 * 24h;
3856  }
3857  AddAddressKnown(*peer, addr);
3858  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3859  // Do not process banned/discouraged addresses beyond remembering we received them
3860  continue;
3861  }
3862  ++num_proc;
3863  const bool reachable{g_reachable_nets.Contains(addr)};
3864  if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
3865  // Relay to a limited number of other nodes
3866  RelayAddress(pfrom.GetId(), addr, reachable);
3867  }
3868  // Do not store addresses outside our network
3869  if (reachable) {
3870  vAddrOk.push_back(addr);
3871  }
3872  }
3873  peer->m_addr_processed += num_proc;
3874  peer->m_addr_rate_limited += num_rate_limit;
3875  LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3876  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3877 
3878  m_addrman.Add(vAddrOk, pfrom.addr, 2h);
3879  if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
3880 
3881  // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
3882  if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3883  LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
3884  pfrom.fDisconnect = true;
3885  }
3886  return;
3887  }
3888 
3889  if (msg_type == NetMsgType::INV) {
3890  std::vector<CInv> vInv;
3891  vRecv >> vInv;
3892  if (vInv.size() > MAX_INV_SZ)
3893  {
3894  Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size()));
3895  return;
3896  }
3897 
3898  const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3899 
3900  LOCK(cs_main);
3901 
3902  const auto current_time{GetTime<std::chrono::microseconds>()};
3903  uint256* best_block{nullptr};
3904 
3905  for (CInv& inv : vInv) {
3906  if (interruptMsgProc) return;
3907 
3908  // Ignore INVs that don't match wtxidrelay setting.
3909  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
3910  // This is fine as no INV messages are involved in that process.
3911  if (peer->m_wtxid_relay) {
3912  if (inv.IsMsgTx()) continue;
3913  } else {
3914  if (inv.IsMsgWtx()) continue;
3915  }
3916 
3917  if (inv.IsMsgBlk()) {
3918  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
3919  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3920 
3921  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
3922  if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
3923  // Headers-first is the primary method of announcement on
3924  // the network. If a node fell back to sending blocks by
3925  // inv, it may be for a re-org, or because we haven't
3926  // completed initial headers sync. The final block hash
3927  // provided should be the highest, so send a getheaders and
3928  // then fetch the blocks we need to catch up.
3929  best_block = &inv.hash;
3930  }
3931  } else if (inv.IsGenTxMsg()) {
3932  if (reject_tx_invs) {
3933  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
3934  pfrom.fDisconnect = true;
3935  return;
3936  }
3937  const GenTxid gtxid = ToGenTxid(inv);
3938  const bool fAlreadyHave = AlreadyHaveTx(gtxid);
3939  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3940 
3941  AddKnownTx(*peer, inv.hash);
3942  if (!fAlreadyHave && !m_chainman.IsInitialBlockDownload()) {
3943  AddTxAnnouncement(pfrom, gtxid, current_time);
3944  }
3945  } else {
3946  LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
3947  }
3948  }
3949 
3950  if (best_block != nullptr) {
3951  // If we haven't started initial headers-sync with this peer, then
3952  // consider sending a getheaders now. On initial startup, there's a
3953  // reliability vs bandwidth tradeoff, where we are only trying to do
3954  // initial headers sync with one peer at a time, with a long
3955  // timeout (at which point, if the sync hasn't completed, we will
3956  // disconnect the peer and then choose another). In the meantime,
3957  // as new blocks are found, we are willing to add one new peer per
3958  // block to sync with as well, to sync quicker in the case where
3959  // our initial peer is unresponsive (but less bandwidth than we'd
3960  // use if we turned on sync with all peers).
3961  CNodeState& state{*Assert(State(pfrom.GetId()))};
3962  if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
3963  if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
3964  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3965  m_chainman.m_best_header->nHeight, best_block->ToString(),
3966  pfrom.GetId());
3967  }
3968  if (!state.fSyncStarted) {
3969  peer->m_inv_triggered_getheaders_before_sync = true;
3970  // Update the last block hash that triggered a new headers
3971  // sync, so that we don't turn on headers sync with more
3972  // than 1 new peer every new block.
3973  m_last_block_inv_triggering_headers_sync = *best_block;
3974  }
3975  }
3976  }
3977 
3978  return;
3979  }
3980 
3981  if (msg_type == NetMsgType::GETDATA) {
3982  std::vector<CInv> vInv;
3983  vRecv >> vInv;
3984  if (vInv.size() > MAX_INV_SZ)
3985  {
3986  Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size()));
3987  return;
3988  }
3989 
3990  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
3991 
3992  if (vInv.size() > 0) {
3993  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
3994  }
3995 
3996  {
3997  LOCK(peer->m_getdata_requests_mutex);
3998  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
3999  ProcessGetData(pfrom, *peer, interruptMsgProc);
4000  }
4001 
4002  return;
4003  }
4004 
4005  if (msg_type == NetMsgType::GETBLOCKS) {
4006  CBlockLocator locator;
4007  uint256 hashStop;
4008  vRecv >> locator >> hashStop;
4009 
4010  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4011  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4012  pfrom.fDisconnect = true;
4013  return;
4014  }
4015 
4016  // We might have announced the currently-being-connected tip using a
4017  // compact block, which resulted in the peer sending a getblocks
4018  // request, which we would otherwise respond to without the new block.
4019  // To avoid this situation we simply verify that we are on our best
4020  // known chain now. This is super overkill, but we handle it better
4021  // for getheaders requests, and there are no known nodes which support
4022  // compact blocks but still use getblocks to request blocks.
4023  {
4024  std::shared_ptr<const CBlock> a_recent_block;
4025  {
4026  LOCK(m_most_recent_block_mutex);
4027  a_recent_block = m_most_recent_block;
4028  }
4029  BlockValidationState state;
4030  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4031  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4032  }
4033  }
4034 
4035  LOCK(cs_main);
4036 
4037  // Find the last block the caller has in the main chain
4038  const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4039 
4040  // Send the rest of the chain
4041  if (pindex)
4042  pindex = m_chainman.ActiveChain().Next(pindex);
4043  int nLimit = 500;
4044  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4045  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4046  {
4047  if (pindex->GetBlockHash() == hashStop)
4048  {
4049  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4050  break;
4051  }
4052  // If pruning, don't inv blocks unless we have on disk and are likely to still have
4053  // for some reasonable time window (1 hour) that block relay might require.
4054  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4055  if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4056  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4057  break;
4058  }
4059  WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4060  if (--nLimit <= 0) {
4061  // When this block is requested, we'll send an inv that'll
4062  // trigger the peer to getblocks the next batch of inventory.
4063  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4064  WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4065  break;
4066  }
4067  }
4068  return;
4069  }
4070 
4071  if (msg_type == NetMsgType::GETBLOCKTXN) {
4073  vRecv >> req;
4074 
4075  std::shared_ptr<const CBlock> recent_block;
4076  {
4077  LOCK(m_most_recent_block_mutex);
4078  if (m_most_recent_block_hash == req.blockhash)
4079  recent_block = m_most_recent_block;
4080  // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4081  }
4082  if (recent_block) {
4083  SendBlockTransactions(pfrom, *peer, *recent_block, req);
4084  return;
4085  }
4086 
4087  {
4088  LOCK(cs_main);
4089 
4090  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4091  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4092  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4093  return;
4094  }
4095 
4096  if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4097  CBlock block;
4098  const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
4099  assert(ret);
4100 
4101  SendBlockTransactions(pfrom, *peer, block, req);
4102  return;
4103  }
4104  }
4105 
4106  // If an older block is requested (should never happen in practice,
4107  // but can happen in tests) send a block response instead of a
4108  // blocktxn response. Sending a full block response instead of a
4109  // small blocktxn response is preferable in the case where a peer
4110  // might maliciously send lots of getblocktxn requests to trigger
4111  // expensive disk reads, because it will require the peer to
4112  // actually receive all the data read from disk over the network.
4113  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4114  CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
4115  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4116  // The message processing loop will go around again (without pausing) and we'll respond then
4117  return;
4118  }
4119 
4120  if (msg_type == NetMsgType::GETHEADERS) {
4121  CBlockLocator locator;
4122  uint256 hashStop;
4123  vRecv >> locator >> hashStop;
4124 
4125  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4126  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4127  pfrom.fDisconnect = true;
4128  return;
4129  }
4130 
4131  if (m_chainman.m_blockman.LoadingBlocks()) {
4132  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4133  return;
4134  }
4135 
4136  LOCK(cs_main);
4137 
4138  // Note that if we were to be on a chain that forks from the checkpointed
4139  // chain, then serving those headers to a peer that has seen the
4140  // checkpointed chain would cause that peer to disconnect us. Requiring
4141  // that our chainwork exceed the minimum chain work is a protection against
4142  // being fed a bogus chain when we started up for the first time and
4143  // getting partitioned off the honest network for serving that chain to
4144  // others.
4145  if (m_chainman.ActiveTip() == nullptr ||
4146  (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4147  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4148  // Just respond with an empty headers message, to tell the peer to
4149  // go away but not treat us as unresponsive.
4150  MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>());
4151  return;
4152  }
4153 
4154  CNodeState *nodestate = State(pfrom.GetId());
4155  const CBlockIndex* pindex = nullptr;
4156  if (locator.IsNull())
4157  {
4158  // If locator is null, return the hashStop block
4159  pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4160  if (!pindex) {
4161  return;
4162  }
4163 
4164  if (!BlockRequestAllowed(pindex)) {
4165  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4166  return;
4167  }
4168  }
4169  else
4170  {
4171  // Find the last block the caller has in the main chain
4172  pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4173  if (pindex)
4174  pindex = m_chainman.ActiveChain().Next(pindex);
4175  }
4176 
4177  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4178  std::vector<CBlock> vHeaders;
4179  int nLimit = MAX_HEADERS_RESULTS;
4180  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4181  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4182  {
4183  vHeaders.emplace_back(pindex->GetBlockHeader());
4184  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4185  break;
4186  }
4187  // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4188  // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4189  // headers message). In both cases it's safe to update
4190  // pindexBestHeaderSent to be our tip.
4191  //
4192  // It is important that we simply reset the BestHeaderSent value here,
4193  // and not max(BestHeaderSent, newHeaderSent). We might have announced
4194  // the currently-being-connected tip using a compact block, which
4195  // resulted in the peer sending a headers request, which we respond to
4196  // without the new block. By resetting the BestHeaderSent, we ensure we
4197  // will re-announce the new block via headers (or compact blocks again)
4198  // in the SendMessages logic.
4199  nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4200  MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
4201  return;
4202  }
4203 
4204  if (msg_type == NetMsgType::TX) {
4205  if (RejectIncomingTxs(pfrom)) {
4206  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
4207  pfrom.fDisconnect = true;
4208  return;
4209  }
4210 
4211  // Stop processing the transaction early if we are still in IBD since we don't
4212  // have enough information to validate it yet. Sending unsolicited transactions
4213  // is not considered a protocol violation, so don't punish the peer.
4214  if (m_chainman.IsInitialBlockDownload()) return;
4215 
4216  CTransactionRef ptx;
4217  vRecv >> TX_WITH_WITNESS(ptx);
4218  const CTransaction& tx = *ptx;
4219 
4220  const uint256& txid = ptx->GetHash();
4221  const uint256& wtxid = ptx->GetWitnessHash();
4222 
4223  const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4224  AddKnownTx(*peer, hash);
4225 
4226  LOCK(cs_main);
4227 
4228  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
4229  if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
4230 
4231  // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
4232  // absence of witness malleation, this is strictly better, because the
4233  // recent rejects filter may contain the wtxid but rarely contains
4234  // the txid of a segwit transaction that has been rejected.
4235  // In the presence of witness malleation, it's possible that by only
4236  // doing the check with wtxid, we could overlook a transaction which
4237  // was confirmed with a different witness, or exists in our mempool
4238  // with a different witness, but this has limited downside:
4239  // mempool validation does its own lookup of whether we have the txid
4240  // already; and an adversary can already relay us old transactions
4241  // (older than our recency filter) if trying to DoS us, without any need
4242  // for witness malleation.
4243  if (AlreadyHaveTx(GenTxid::Wtxid(wtxid))) {
4245  // Always relay transactions received from peers with forcerelay
4246  // permission, even if they were already in the mempool, allowing
4247  // the node to function as a gateway for nodes hidden behind it.
4248  if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4249  LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4250  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4251  } else {
4252  LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4253  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4254  RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4255  }
4256  }
4257  // If a tx is detected by m_recent_rejects it is ignored. Because we haven't
4258  // submitted the tx to our mempool, we won't have computed a DoS
4259  // score for it or determined exactly why we consider it invalid.
4260  //
4261  // This means we won't penalize any peer subsequently relaying a DoSy
4262  // tx (even if we penalized the first peer who gave it to us) because
4263  // we have to account for m_recent_rejects showing false positives. In
4264  // other words, we shouldn't penalize a peer if we aren't *sure* they
4265  // submitted a DoSy tx.
4266  //
4267  // Note that m_recent_rejects doesn't just record DoSy or invalid
4268  // transactions, but any tx not accepted by the mempool, which may be
4269  // due to node policy (vs. consensus). So we can't blanket penalize a
4270  // peer simply for relaying a tx that our m_recent_rejects has caught,
4271  // regardless of false positives.
4272  return;
4273  }
4274 
4275  const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4276  const TxValidationState& state = result.m_state;
4277 
4279  // As this version of the transaction was acceptable, we can forget about any
4280  // requests for it.
4281  m_txrequest.ForgetTxHash(tx.GetHash());
4282  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4283  RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4284  m_orphanage.AddChildrenToWorkSet(tx);
4285 
4286  pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4287 
4288  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
4289  pfrom.GetId(),
4290  tx.GetHash().ToString(),
4291  tx.GetWitnessHash().ToString(),
4292  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
4293 
4294  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
4295  AddToCompactExtraTransactions(removedTx);
4296  }
4297  }
4299  {
4300  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
4301 
4302  // Deduplicate parent txids, so that we don't have to loop over
4303  // the same parent txid more than once down below.
4304  std::vector<uint256> unique_parents;
4305  unique_parents.reserve(tx.vin.size());
4306  for (const CTxIn& txin : tx.vin) {
4307  // We start with all parents, and then remove duplicates below.
4308  unique_parents.push_back(txin.prevout.hash);
4309  }
4310  std::sort(unique_parents.begin(), unique_parents.end());
4311  unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
4312  for (const uint256& parent_txid : unique_parents) {
4313  if (m_recent_rejects.contains(parent_txid)) {
4314  fRejectedParents = true;
4315  break;
4316  }
4317  }
4318  if (!fRejectedParents) {
4319  const auto current_time{GetTime<std::chrono::microseconds>()};
4320 
4321  for (const uint256& parent_txid : unique_parents) {
4322  // Here, we only have the txid (and not wtxid) of the
4323  // inputs, so we only request in txid mode, even for
4324  // wtxidrelay peers.
4325  // Eventually we should replace this with an improved
4326  // protocol for getting all unconfirmed parents.
4327  const auto gtxid{GenTxid::Txid(parent_txid)};
4328  AddKnownTx(*peer, parent_txid);
4329  if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
4330  }
4331 
4332  if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
4333  AddToCompactExtraTransactions(ptx);
4334  }
4335 
4336  // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
4337  m_txrequest.ForgetTxHash(tx.GetHash());
4338  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4339 
4340  // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
4341  m_orphanage.LimitOrphans(m_opts.max_orphan_txs, m_rng);
4342  } else {
4343  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n",
4344  tx.GetHash().ToString(),
4345  tx.GetWitnessHash().ToString());
4346  // We will continue to reject this tx since it has rejected
4347  // parents so avoid re-requesting it from other peers.
4348  // Here we add both the txid and the wtxid, as we know that
4349  // regardless of what witness is provided, we will not accept
4350  // this, so we don't need to allow for redownload of this txid
4351  // from any of our non-wtxidrelay peers.
4352  m_recent_rejects.insert(tx.GetHash().ToUint256());
4353  m_recent_rejects.insert(tx.GetWitnessHash().ToUint256());
4354  m_txrequest.ForgetTxHash(tx.GetHash());
4355  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4356  }
4357  } else {
4359  // We can add the wtxid of this transaction to our reject filter.
4360  // Do not add txids of witness transactions or witness-stripped
4361  // transactions to the filter, as they can have been malleated;
4362  // adding such txids to the reject filter would potentially
4363  // interfere with relay of valid transactions from peers that
4364  // do not support wtxid-based relay. See
4365  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
4366  // We can remove this restriction (and always add wtxids to
4367  // the filter even for witness stripped transactions) once
4368  // wtxid-based relay is broadly deployed.
4369  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
4370  // for concerns around weakening security of unupgraded nodes
4371  // if we start doing this too early.
4372  m_recent_rejects.insert(tx.GetWitnessHash().ToUint256());
4373  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4374  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
4375  // then we know that the witness was irrelevant to the policy
4376  // failure, since this check depends only on the txid
4377  // (the scriptPubKey being spent is covered by the txid).
4378  // Add the txid to the reject filter to prevent repeated
4379  // processing of this transaction in the event that child
4380  // transactions are later received (resulting in
4381  // parent-fetching by txid via the orphan-handling logic).
4383  m_recent_rejects.insert(tx.GetHash().ToUint256());
4384  m_txrequest.ForgetTxHash(tx.GetHash());
4385  }
4386  if (RecursiveDynamicUsage(*ptx) < 100000) {
4387  AddToCompactExtraTransactions(ptx);
4388  }
4389  }
4390  }
4391 
4392  if (state.IsInvalid()) {
4393  LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
4394  tx.GetHash().ToString(),
4395  tx.GetWitnessHash().ToString(),
4396  pfrom.GetId(),
4397  state.ToString());
4398  MaybePunishNodeForTx(pfrom.GetId(), state);
4399  }
4400  return;
4401  }
4402 
4403  if (msg_type == NetMsgType::CMPCTBLOCK)
4404  {
4405  // Ignore cmpctblock received while importing
4406  if (m_chainman.m_blockman.LoadingBlocks()) {
4407  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4408  return;
4409  }
4410 
4411  CBlockHeaderAndShortTxIDs cmpctblock;
4412  vRecv >> cmpctblock;
4413 
4414  bool received_new_header = false;
4415  const auto blockhash = cmpctblock.header.GetHash();
4416 
4417  {
4418  LOCK(cs_main);
4419 
4420  const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4421  if (!prev_block) {
4422  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4423  if (!m_chainman.IsInitialBlockDownload()) {
4424  MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4425  }
4426  return;
4427  } else if (prev_block->nChainWork + CalculateHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) {
4428  // If we get a low-work header in a compact block, we can ignore it.
4429  LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4430  return;
4431  }
4432 
4433  if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4434  received_new_header = true;
4435  }
4436  }
4437 
4438  const CBlockIndex *pindex = nullptr;
4439  BlockValidationState state;
4440  if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) {
4441  if (state.IsInvalid()) {
4442  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4443  return;
4444  }
4445  }
4446 
4447  if (received_new_header) {
4448  LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
4449  blockhash.ToString(), pfrom.GetId());
4450  }
4451 
4452  bool fProcessBLOCKTXN = false;
4453 
4454  // If we end up treating this as a plain headers message, call that as well
4455  // without cs_main.
4456  bool fRevertToHeaderProcessing = false;
4457 
4458  // Keep a CBlock for "optimistic" compactblock reconstructions (see
4459  // below)
4460  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4461  bool fBlockReconstructed = false;
4462 
4463  {
4464  LOCK(cs_main);
4465  // If AcceptBlockHeader returned true, it set pindex
4466  assert(pindex);
4467  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4468 
4469  CNodeState *nodestate = State(pfrom.GetId());
4470 
4471  // If this was a new header with more work than our tip, update the
4472  // peer's last block announcement time
4473  if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4474  nodestate->m_last_block_announcement = GetTime();
4475  }
4476 
4477  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4478  return;
4479 
4480  auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4481  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4482  bool requested_block_from_this_peer{false};
4483 
4484  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4485  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4486 
4487  while (range_flight.first != range_flight.second) {
4488  if (range_flight.first->second.first == pfrom.GetId()) {
4489  requested_block_from_this_peer = true;
4490  break;
4491  }
4492  range_flight.first++;
4493  }
4494 
4495  if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4496  pindex->nTx != 0) { // We had this block at some point, but pruned it
4497  if (requested_block_from_this_peer) {
4498  // We requested this block for some reason, but our mempool will probably be useless
4499  // so we just grab the block via normal getdata
4500  std::vector<CInv> vInv(1);
4501  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4502  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4503  }
4504  return;
4505  }
4506 
4507  // If we're not close to tip yet, give up and let parallel block fetch work its magic
4508  if (!already_in_flight && !CanDirectFetch()) {
4509  return;
4510  }
4511 
4512  // We want to be a bit conservative just to be extra careful about DoS
4513  // possibilities in compact block processing...
4514  if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4515  if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4516  requested_block_from_this_peer) {
4517  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4518  if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4519  if (!(*queuedBlockIt)->partialBlock)
4520  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4521  else {
4522  // The block was already in flight using compact blocks from the same peer
4523  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4524  return;
4525  }
4526  }
4527 
4528  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4529  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4530  if (status == READ_STATUS_INVALID) {
4531  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4532  Misbehaving(*peer, 100, "invalid compact block");
4533  return;
4534  } else if (status == READ_STATUS_FAILED) {
4535  if (first_in_flight) {
4536  // Duplicate txindexes, the block is now in-flight, so just request it
4537  std::vector<CInv> vInv(1);
4538  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4539  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4540  } else {
4541  // Give up for this peer and wait for other peer(s)
4542  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4543  }
4544  return;
4545  }
4546 
4548  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4549  if (!partialBlock.IsTxAvailable(i))
4550  req.indexes.push_back(i);
4551  }
4552  if (req.indexes.empty()) {
4553  fProcessBLOCKTXN = true;
4554  } else if (first_in_flight) {
4555  // We will try to round-trip any compact blocks we get on failure,
4556  // as long as it's first...
4557  req.blockhash = pindex->GetBlockHash();
4558  MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4559  } else if (pfrom.m_bip152_highbandwidth_to &&
4560  (!pfrom.IsInboundConn() ||
4561  IsBlockRequestedFromOutbound(blockhash) ||
4562  already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4563  // ... or it's a hb relay peer and:
4564  // - peer is outbound, or
4565  // - we already have an outbound attempt in flight(so we'll take what we can get), or
4566  // - it's not the final parallel download slot (which we may reserve for first outbound)
4567  req.blockhash = pindex->GetBlockHash();
4568  MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4569  } else {
4570  // Give up for this peer and wait for other peer(s)
4571  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4572  }
4573  } else {
4574  // This block is either already in flight from a different
4575  // peer, or this peer has too many blocks outstanding to
4576  // download from.
4577  // Optimistically try to reconstruct anyway since we might be
4578  // able to without any round trips.
4579  PartiallyDownloadedBlock tempBlock(&m_mempool);
4580  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4581  if (status != READ_STATUS_OK) {
4582  // TODO: don't ignore failures
4583  return;
4584  }
4585  std::vector<CTransactionRef> dummy;
4586  status = tempBlock.FillBlock(*pblock, dummy);
4587  if (status == READ_STATUS_OK) {
4588  fBlockReconstructed = true;
4589  }
4590  }
4591  } else {
4592  if (requested_block_from_this_peer) {
4593  // We requested this block, but its far into the future, so our
4594  // mempool will probably be useless - request the block normally
4595  std::vector<CInv> vInv(1);
4596  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4597  MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4598  return;
4599  } else {
4600  // If this was an announce-cmpctblock, we want the same treatment as a header message
4601  fRevertToHeaderProcessing = true;
4602  }
4603  }
4604  } // cs_main
4605 
4606  if (fProcessBLOCKTXN) {
4607  BlockTransactions txn;
4608  txn.blockhash = blockhash;
4609  return ProcessCompactBlockTxns(pfrom, *peer, txn);
4610  }
4611 
4612  if (fRevertToHeaderProcessing) {
4613  // Headers received from HB compact block peers are permitted to be
4614  // relayed before full validation (see BIP 152), so we don't want to disconnect
4615  // the peer if the header turns out to be for an invalid block.
4616  // Note that if a peer tries to build on an invalid chain, that
4617  // will be detected and the peer will be disconnected/discouraged.
4618  return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4619  }
4620 
4621  if (fBlockReconstructed) {
4622  // If we got here, we were able to optimistically reconstruct a
4623  // block that is in flight from some other peer.
4624  {
4625  LOCK(cs_main);
4626  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4627  }
4628  // Setting force_processing to true means that we bypass some of
4629  // our anti-DoS protections in AcceptBlock, which filters
4630  // unrequested blocks that might be trying to waste our resources
4631  // (eg disk space). Because we only try to reconstruct blocks when
4632  // we're close to caught up (via the CanDirectFetch() requirement
4633  // above, combined with the behavior of not requesting blocks until
4634  // we have a chain with at least the minimum chain work), and we ignore
4635  // compact blocks with less work than our tip, it is safe to treat
4636  // reconstructed compact blocks as having been requested.
4637  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4638  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4639  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4640  // Clear download state for this block, which is in
4641  // process from some other peer. We do this after calling
4642  // ProcessNewBlock so that a malleated cmpctblock announcement
4643  // can't be used to interfere with block relay.
4644  RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4645  }
4646  }
4647  return;
4648  }
4649 
4650  if (msg_type == NetMsgType::BLOCKTXN)
4651  {
4652  // Ignore blocktxn received while importing
4653  if (m_chainman.m_blockman.LoadingBlocks()) {
4654  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4655  return;
4656  }
4657 
4658  BlockTransactions resp;
4659  vRecv >> resp;
4660 
4661  return ProcessCompactBlockTxns(pfrom, *peer, resp);
4662  }
4663 
4664  if (msg_type == NetMsgType::HEADERS)
4665  {
4666  // Ignore headers received while importing
4667  if (m_chainman.m_blockman.LoadingBlocks()) {
4668  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4669  return;
4670  }
4671 
4672  // Assume that this is in response to any outstanding getheaders
4673  // request we may have sent, and clear out the time of our last request
4674  peer->m_last_getheaders_timestamp = {};
4675 
4676  std::vector<CBlockHeader> headers;
4677 
4678  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4679  unsigned int nCount = ReadCompactSize(vRecv);
4680  if (nCount > MAX_HEADERS_RESULTS) {
4681  Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount));
4682  return;
4683  }
4684  headers.resize(nCount);
4685  for (unsigned int n = 0; n < nCount; n++) {
4686  vRecv >> headers[n];
4687  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4688  }
4689 
4690  ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
4691 
4692  // Check if the headers presync progress needs to be reported to validation.
4693  // This needs to be done without holding the m_headers_presync_mutex lock.
4694  if (m_headers_presync_should_signal.exchange(false)) {
4695  HeadersPresyncStats stats;
4696  {
4697  LOCK(m_headers_presync_mutex);
4698  auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4699  if (it != m_headers_presync_stats.end()) stats = it->second;
4700  }
4701  if (stats.second) {
4702  m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
4703  }
4704  }
4705 
4706  return;
4707  }
4708 
4709  if (msg_type == NetMsgType::BLOCK)
4710  {
4711  // Ignore block received while importing
4712  if (m_chainman.m_blockman.LoadingBlocks()) {
4713  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4714  return;
4715  }
4716 
4717  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4718  vRecv >> TX_WITH_WITNESS(*pblock);
4719 
4720  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4721 
4722  const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
4723 
4724  // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
4725  if (prev_block && IsBlockMutated(/*block=*/*pblock,
4726  /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
4727  LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
4728  Misbehaving(*peer, 100, "mutated block");
4729  WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
4730  return;
4731  }
4732 
4733  bool forceProcessing = false;
4734  const uint256 hash(pblock->GetHash());
4735  bool min_pow_checked = false;
4736  {
4737  LOCK(cs_main);
4738  // Always process the block if we requested it, since we may
4739  // need it even when it's not a candidate for a new best tip.
4740  forceProcessing = IsBlockRequested(hash);
4741  RemoveBlockRequest(hash, pfrom.GetId());
4742  // mapBlockSource is only used for punishing peers and setting
4743  // which peers send us compact blocks, so the race between here and
4744  // cs_main in ProcessNewBlock is fine.
4745  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4746 
4747  // Check work on this block against our anti-dos thresholds.
4748  if (prev_block && prev_block->nChainWork + CalculateHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) {
4749  min_pow_checked = true;
4750  }
4751  }
4752  ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4753  return;
4754  }
4755 
4756  if (msg_type == NetMsgType::GETADDR) {
4757  // This asymmetric behavior for inbound and outbound connections was introduced
4758  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4759  // to users' AddrMan and later request them by sending getaddr messages.
4760  // Making nodes which are behind NAT and can only make outgoing connections ignore
4761  // the getaddr message mitigates the attack.
4762  if (!pfrom.IsInboundConn()) {
4763  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4764  return;
4765  }
4766 
4767  // Since this must be an inbound connection, SetupAddressRelay will
4768  // never fail.
4769  Assume(SetupAddressRelay(pfrom, *peer));
4770 
4771  // Only send one GetAddr response per connection to reduce resource waste
4772  // and discourage addr stamping of INV announcements.
4773  if (peer->m_getaddr_recvd) {
4774  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4775  return;
4776  }
4777  peer->m_getaddr_recvd = true;
4778 
4779  peer->m_addrs_to_send.clear();
4780  std::vector<CAddress> vAddr;
4782  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4783  } else {
4784  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4785  }
4786  for (const CAddress &addr : vAddr) {
4787  PushAddress(*peer, addr);
4788  }
4789  return;
4790  }
4791 
4792  if (msg_type == NetMsgType::MEMPOOL) {
4793  // Only process received mempool messages if we advertise NODE_BLOOM
4794  // or if the peer has mempool permissions.
4795  if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4796  {
4798  {
4799  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
4800  pfrom.fDisconnect = true;
4801  }
4802  return;
4803  }
4804 
4805  if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4806  {
4808  {
4809  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
4810  pfrom.fDisconnect = true;
4811  }
4812  return;
4813  }
4814 
4815  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4816  LOCK(tx_relay->m_tx_inventory_mutex);
4817  tx_relay->m_send_mempool = true;
4818  }
4819  return;
4820  }
4821 
4822  if (msg_type == NetMsgType::PING) {
4823  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4824  uint64_t nonce = 0;
4825  vRecv >> nonce;
4826  // Echo the message back with the nonce. This allows for two useful features:
4827  //
4828  // 1) A remote node can quickly check if the connection is operational
4829  // 2) Remote nodes can measure the latency of the network thread. If this node
4830  // is overloaded it won't respond to pings quickly and the remote node can
4831  // avoid sending us more work, like chain download requests.
4832  //
4833  // The nonce stops the remote getting confused between different pings: without
4834  // it, if the remote node sends a ping once per second and this node takes 5
4835  // seconds to respond to each, the 5th ping the remote sends would appear to
4836  // return very quickly.
4837  MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
4838  }
4839  return;
4840  }
4841 
4842  if (msg_type == NetMsgType::PONG) {
4843  const auto ping_end = time_received;
4844  uint64_t nonce = 0;
4845  size_t nAvail = vRecv.in_avail();
4846  bool bPingFinished = false;
4847  std::string sProblem;
4848 
4849  if (nAvail >= sizeof(nonce)) {
4850  vRecv >> nonce;
4851 
4852  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
4853  if (peer->m_ping_nonce_sent != 0) {
4854  if (nonce == peer->m_ping_nonce_sent) {
4855  // Matching pong received, this ping is no longer outstanding
4856  bPingFinished = true;
4857  const auto ping_time = ping_end - peer->m_ping_start.load();
4858  if (ping_time.count() >= 0) {
4859  // Let connman know about this successful ping-pong
4860  pfrom.PongReceived(ping_time);
4861  } else {
4862  // This should never happen
4863  sProblem = "Timing mishap";
4864  }
4865  } else {
4866  // Nonce mismatches are normal when pings are overlapping
4867  sProblem = "Nonce mismatch";
4868  if (nonce == 0) {
4869  // This is most likely a bug in another implementation somewhere; cancel this ping
4870  bPingFinished = true;
4871  sProblem = "Nonce zero";
4872  }
4873  }
4874  } else {
4875  sProblem = "Unsolicited pong without ping";
4876  }
4877  } else {
4878  // This is most likely a bug in another implementation somewhere; cancel this ping
4879  bPingFinished = true;
4880  sProblem = "Short payload";
4881  }
4882 
4883  if (!(sProblem.empty())) {
4884  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4885  pfrom.GetId(),
4886  sProblem,
4887  peer->m_ping_nonce_sent,
4888  nonce,
4889  nAvail);
4890  }
4891  if (bPingFinished) {
4892  peer->m_ping_nonce_sent = 0;
4893  }
4894  return;
4895  }
4896 
4897  if (msg_type == NetMsgType::FILTERLOAD) {
4898  if (!(peer->m_our_services & NODE_BLOOM)) {
4899  LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4900  pfrom.fDisconnect = true;
4901  return;
4902  }
4903  CBloomFilter filter;
4904  vRecv >> filter;
4905 
4906  if (!filter.IsWithinSizeConstraints())
4907  {
4908  // There is no excuse for sending a too-large filter
4909  Misbehaving(*peer, 100, "too-large bloom filter");
4910  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4911  {
4912  LOCK(tx_relay->m_bloom_filter_mutex);
4913  tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
4914  tx_relay->m_relay_txs = true;
4915  }
4916  pfrom.m_bloom_filter_loaded = true;
4917  pfrom.m_relays_txs = true;
4918  }
4919  return;
4920  }
4921 
4922  if (msg_type == NetMsgType::FILTERADD) {
4923  if (!(peer->m_our_services & NODE_BLOOM)) {
4924  LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4925  pfrom.fDisconnect = true;
4926  return;
4927  }
4928  std::vector<unsigned char> vData;
4929  vRecv >> vData;
4930 
4931  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
4932  // and thus, the maximum size any matched object can have) in a filteradd message
4933  bool bad = false;
4934  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4935  bad = true;
4936  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4937  LOCK(tx_relay->m_bloom_filter_mutex);
4938  if (tx_relay->m_bloom_filter) {
4939  tx_relay->m_bloom_filter->insert(vData);
4940  } else {
4941  bad = true;
4942  }
4943  }
4944  if (bad) {
4945  Misbehaving(*peer, 100, "bad filteradd message");
4946  }
4947  return;
4948  }
4949 
4950  if (msg_type == NetMsgType::FILTERCLEAR) {
4951  if (!(peer->m_our_services & NODE_BLOOM)) {
4952  LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4953  pfrom.fDisconnect = true;
4954  return;
4955  }
4956  auto tx_relay = peer->GetTxRelay();
4957  if (!tx_relay) return;
4958 
4959  {
4960  LOCK(tx_relay->m_bloom_filter_mutex);
4961  tx_relay->m_bloom_filter = nullptr;
4962  tx_relay->m_relay_txs = true;
4963  }
4964  pfrom.m_bloom_filter_loaded = false;
4965  pfrom.m_relays_txs = true;
4966  return;
4967  }
4968 
4969  if (msg_type == NetMsgType::FEEFILTER) {
4970  CAmount newFeeFilter = 0;
4971  vRecv >> newFeeFilter;
4972  if (MoneyRange(newFeeFilter)) {
4973  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4974  tx_relay->m_fee_filter_received = newFeeFilter;
4975  }
4976  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4977  }
4978  return;
4979  }
4980 
4981  if (msg_type == NetMsgType::GETCFILTERS) {
4982  ProcessGetCFilters(pfrom, *peer, vRecv);
4983  return;
4984  }
4985 
4986  if (msg_type == NetMsgType::GETCFHEADERS) {
4987  ProcessGetCFHeaders(pfrom, *peer, vRecv);
4988  return;
4989  }
4990 
4991  if (msg_type == NetMsgType::GETCFCHECKPT) {
4992  ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4993  return;
4994  }
4995 
4996  if (msg_type == NetMsgType::NOTFOUND) {
4997  std::vector<CInv> vInv;
4998  vRecv >> vInv;
5000  LOCK(::cs_main);
5001  for (CInv &inv : vInv) {
5002  if (inv.IsGenTxMsg()) {
5003  // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
5004  // completed in TxRequestTracker.
5005  m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
5006  }
5007  }
5008  }
5009  return;
5010  }
5011 
5012  // Ignore unknown commands for extensibility
5013  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
5014  return;
5015 }
5016 
5017 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
5018 {
5019  {
5020  LOCK(peer.m_misbehavior_mutex);
5021 
5022  // There's nothing to do if the m_should_discourage flag isn't set
5023  if (!peer.m_should_discourage) return false;
5024 
5025  peer.m_should_discourage = false;
5026  } // peer.m_misbehavior_mutex
5027 
5029  // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
5030  LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
5031  return false;
5032  }
5033 
5034  if (pnode.IsManualConn()) {
5035  // We never disconnect or discourage manual peers for bad behavior
5036  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
5037  return false;
5038  }
5039 
5040  if (pnode.addr.IsLocal()) {
5041  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
5042  // all peers on the same local address)
5043  LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
5044  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
5045  pnode.fDisconnect = true;
5046  return true;
5047  }
5048 
5049  // Normal case: Disconnect the peer and discourage all nodes sharing the address
5050  LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
5051  if (m_banman) m_banman->Discourage(pnode.addr);
5052  m_connman.DisconnectNode(pnode.addr);
5053  return true;
5054 }
5055 
5056 bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
5057 {
5058  AssertLockHeld(g_msgproc_mutex);
5059 
5060  PeerRef peer = GetPeerRef(pfrom->GetId());
5061  if (peer == nullptr) return false;
5062 
5063  {
5064  LOCK(peer->m_getdata_requests_mutex);
5065  if (!peer->m_getdata_requests.empty()) {
5066  ProcessGetData(*pfrom, *peer, interruptMsgProc);
5067  }
5068  }
5069 
5070  const bool processed_orphan = ProcessOrphanTx(*peer);
5071 
5072  if (pfrom->fDisconnect)
5073  return false;
5074 
5075  if (processed_orphan) return true;
5076 
5077  // this maintains the order of responses
5078  // and prevents m_getdata_requests to grow unbounded
5079  {
5080  LOCK(peer->m_getdata_requests_mutex);
5081  if (!peer->m_getdata_requests.empty()) return true;
5082  }
5083 
5084  // Don't bother if send buffer is too full to respond anyway
5085  if (pfrom->fPauseSend) return false;
5086 
5087  auto poll_result{pfrom->PollMessage()};
5088  if (!poll_result) {
5089  // No message to process
5090  return false;
5091  }
5092 
5093  CNetMessage& msg{poll_result->first};
5094  bool fMoreWork = poll_result->second;
5095 
5096  TRACE6(net, inbound_message,
5097  pfrom->GetId(),
5098  pfrom->m_addr_name.c_str(),
5099  pfrom->ConnectionTypeAsString().c_str(),
5100  msg.m_type.c_str(),
5101  msg.m_recv.size(),
5102  msg.m_recv.data()
5103  );
5104 
5105  if (m_opts.capture_messages) {
5106  CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
5107  }
5108 
5109  try {
5110  ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
5111  if (interruptMsgProc) return false;
5112  {
5113  LOCK(peer->m_getdata_requests_mutex);
5114  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
5115  }
5116  // Does this peer has an orphan ready to reconsider?
5117  // (Note: we may have provided a parent for an orphan provided
5118  // by another peer that was already processed; in that case,
5119  // the extra work may not be noticed, possibly resulting in an
5120  // unnecessary 100ms delay)
5121  if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true;
5122  } catch (const std::exception& e) {
5123  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5124  } catch (...) {
5125  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5126  }
5127 
5128  return fMoreWork;
5129 }
5130 
5131 void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5132 {
5134 
5135  CNodeState &state = *State(pto.GetId());
5136 
5137  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5138  // This is an outbound peer subject to disconnection if they don't
5139  // announce a block with as much work as the current tip within
5140  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5141  // their chain has more work than ours, we should sync to it,
5142  // unless it's invalid, in which case we should find that out and
5143  // disconnect from them elsewhere).
5144  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5145  if (state.m_chain_sync.m_timeout != 0s) {
5146  state.m_chain_sync.m_timeout = 0s;
5147  state.m_chain_sync.m_work_header = nullptr;
5148  state.m_chain_sync.m_sent_getheaders = false;
5149  }
5150  } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5151  // Our best block known by this peer is behind our tip, and we're either noticing
5152  // that for the first time, OR this peer was able to catch up to some earlier point
5153  // where we checked against our tip.
5154  // Either way, set a new timeout based on current tip.
5155  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5156  state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5157  state.m_chain_sync.m_sent_getheaders = false;
5158  } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5159  // No evidence yet that our peer has synced to a chain with work equal to that
5160  // of our tip, when we first detected it was behind. Send a single getheaders
5161  // message to give the peer a chance to update us.
5162  if (state.m_chain_sync.m_sent_getheaders) {
5163  // They've run out of time to catch up!
5164  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
5165  pto.fDisconnect = true;
5166  } else {
5167  assert(state.m_chain_sync.m_work_header);
5168  // Here, we assume that the getheaders message goes out,
5169  // because it'll either go out or be skipped because of a
5170  // getheaders in-flight already, in which case the peer should
5171  // still respond to us with a sufficiently high work chain tip.
5172  MaybeSendGetHeaders(pto,
5173  GetLocator(state.m_chain_sync.m_work_header->pprev),
5174  peer);
5175  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5176  state.m_chain_sync.m_sent_getheaders = true;
5177  // Bump the timeout to allow a response, which could clear the timeout
5178  // (if the response shows the peer has synced), reset the timeout (if
5179  // the peer syncs to the required work but not to our tip), or result
5180  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5181  // has not sufficiently progressed)
5182  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5183  }
5184  }
5185  }
5186 }
5187 
5188 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5189 {
5190  // If we have any extra block-relay-only peers, disconnect the youngest unless
5191  // it's given us a block -- in which case, compare with the second-youngest, and
5192  // out of those two, disconnect the peer who least recently gave us a block.
5193  // The youngest block-relay-only peer would be the extra peer we connected
5194  // to temporarily in order to sync our tip; see net.cpp.
5195  // Note that we use higher nodeid as a measure for most recent connection.
5196  if (m_connman.GetExtraBlockRelayCount() > 0) {
5197  std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5198 
5199  m_connman.ForEachNode([&](CNode* pnode) {
5200  if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5201  if (pnode->GetId() > youngest_peer.first) {
5202  next_youngest_peer = youngest_peer;
5203  youngest_peer.first = pnode->GetId();
5204  youngest_peer.second = pnode->m_last_block_time;
5205  }
5206  });
5207  NodeId to_disconnect = youngest_peer.first;
5208  if (youngest_peer.second > next_youngest_peer.second) {
5209  // Our newest block-relay-only peer gave us a block more recently;
5210  // disconnect our second youngest.
5211  to_disconnect = next_youngest_peer.first;
5212  }
5213  m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5215  // Make sure we're not getting a block right now, and that
5216  // we've been connected long enough for this eviction to happen
5217  // at all.
5218  // Note that we only request blocks from a peer if we learn of a
5219  // valid headers chain with at least as much work as our tip.
5220  CNodeState *node_state = State(pnode->GetId());
5221  if (node_state == nullptr ||
5222  (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5223  pnode->fDisconnect = true;
5224  LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5225  pnode->GetId(), count_seconds(pnode->m_last_block_time));
5226  return true;
5227  } else {
5228  LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5229  pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5230  }
5231  return false;
5232  });
5233  }
5234 
5235  // Check whether we have too many outbound-full-relay peers
5236  if (m_connman.GetExtraFullOutboundCount() > 0) {
5237  // If we have more outbound-full-relay peers than we target, disconnect one.
5238  // Pick the outbound-full-relay peer that least recently announced
5239  // us a new block, with ties broken by choosing the more recent
5240  // connection (higher node id)
5241  // Protect peers from eviction if we don't have another connection
5242  // to their network, counting both outbound-full-relay and manual peers.
5243  NodeId worst_peer = -1;
5244  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5245 
5246  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5248 
5249  // Only consider outbound-full-relay peers that are not already
5250  // marked for disconnection
5251  if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5252  CNodeState *state = State(pnode->GetId());
5253  if (state == nullptr) return; // shouldn't be possible, but just in case
5254  // Don't evict our protected peers
5255  if (state->m_chain_sync.m_protect) return;
5256  // If this is the only connection on a particular network that is
5257  // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5258  if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5259  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5260  worst_peer = pnode->GetId();
5261  oldest_block_announcement = state->m_last_block_announcement;
5262  }
5263  });
5264  if (worst_peer != -1) {
5265  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5267 
5268  // Only disconnect a peer that has been connected to us for
5269  // some reasonable fraction of our check-frequency, to give
5270  // it time for new information to have arrived.
5271  // Also don't disconnect any peer we're trying to download a
5272  // block from.
5273  CNodeState &state = *State(pnode->GetId());
5274  if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5275  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5276  pnode->fDisconnect = true;
5277  return true;
5278  } else {
5279  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5280  pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5281  return false;
5282  }
5283  });
5284  if (disconnected) {
5285  // If we disconnected an extra peer, that means we successfully
5286  // connected to at least one peer after the last time we
5287  // detected a stale tip. Don't try any more extra peers until
5288  // we next detect a stale tip, to limit the load we put on the
5289  // network from these extra connections.
5290  m_connman.SetTryNewOutboundPeer(false);
5291  }
5292  }
5293  }
5294 }
5295 
5296 void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5297 {
5298  LOCK(cs_main);
5299 
5300  auto now{GetTime<std::chrono::seconds>()};
5301 
5302  EvictExtraOutboundPeers(now);
5303 
5304  if (now > m_stale_tip_check_time) {
5305  // Check whether our tip is stale, and if so, allow using an extra
5306  // outbound peer
5307  if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5308  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5309  count_seconds(now - m_last_tip_update.load()));
5310  m_connman.SetTryNewOutboundPeer(true);
5311  } else if (m_connman.GetTryNewOutboundPeer()) {
5312  m_connman.SetTryNewOutboundPeer(false);
5313  }
5314  m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5315  }
5316 
5317  if (!m_initial_sync_finished && CanDirectFetch()) {
5318  m_connman.StartExtraBlockRelayPeers();
5319  m_initial_sync_finished = true;
5320  }
5321 }
5322 
5323 void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5324 {
5325  if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5326  peer.m_ping_nonce_sent &&
5327  now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5328  {
5329  // The ping timeout is using mocktime. To disable the check during
5330  // testing, increase -peertimeout.
5331  LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
5332  node_to.fDisconnect = true;
5333  return;
5334  }
5335 
5336  bool pingSend = false;
5337 
5338  if (peer.m_ping_queued) {
5339  // RPC ping request by user
5340  pingSend = true;
5341  }
5342 
5343  if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5344  // Ping automatically sent as a latency probe & keepalive.
5345  pingSend = true;
5346  }
5347 
5348  if (pingSend) {
5349  uint64_t nonce;
5350  do {
5351  nonce = GetRand<uint64_t>();
5352  } while (nonce == 0);
5353  peer.m_ping_queued = false;
5354  peer.m_ping_start = now;
5355  if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5356  peer.m_ping_nonce_sent = nonce;
5357  MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
5358  } else {
5359  // Peer is too old to support ping command with nonce, pong will never arrive.
5360  peer.m_ping_nonce_sent = 0;
5361  MakeAndPushMessage(node_to, NetMsgType::PING);
5362  }
5363  }
5364 }
5365 
5366 void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5367 {
5368  // Nothing to do for non-address-relay peers
5369  if (!peer.m_addr_relay_enabled) return;
5370 
5371  LOCK(peer.m_addr_send_times_mutex);
5372  // Periodically advertise our local address to the peer.
5373  if (fListen && !m_chainman.IsInitialBlockDownload() &&
5374  peer.m_next_local_addr_send < current_time) {
5375  // If we've sent before, clear the bloom filter for the peer, so that our
5376  // self-announcement will actually go out.
5377  // This might be unnecessary if the bloom filter has already rolled
5378  // over since our last self-announcement, but there is only a small
5379  // bandwidth cost that we can incur by doing this (which happens
5380  // once a day on average).
5381  if (peer.m_next_local_addr_send != 0us) {
5382  peer.m_addr_known->reset();
5383  }
5384  if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5385  CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5386  PushAddress(peer, local_addr);
5387  }
5388  peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5389  }
5390 
5391  // We sent an `addr` message to this peer recently. Nothing more to do.
5392  if (current_time <= peer.m_next_addr_send) return;
5393 
5394  peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
5395 
5396  if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5397  // Should be impossible since we always check size before adding to
5398  // m_addrs_to_send. Recover by trimming the vector.
5399  peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5400  }
5401 
5402  // Remove addr records that the peer already knows about, and add new
5403  // addrs to the m_addr_known filter on the same pass.
5404  auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5405  bool ret = peer.m_addr_known->contains(addr.GetKey());
5406  if (!ret) peer.m_addr_known->insert(addr.GetKey());
5407  return ret;
5408  };
5409  peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5410  peer.m_addrs_to_send.end());
5411 
5412  // No addr messages to send
5413  if (peer.m_addrs_to_send.empty()) return;
5414 
5415  if (peer.m_wants_addrv2) {
5416  MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send));
5417  } else {
5418  MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send));
5419  }
5420  peer.m_addrs_to_send.clear();
5421 
5422  // we only send the big addr message once
5423  if (peer.m_addrs_to_send.capacity() > 40) {
5424  peer.m_addrs_to_send.shrink_to_fit();
5425  }
5426 }
5427 
5428 void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5429 {
5430  // Delay sending SENDHEADERS (BIP 130) until we're done with an
5431  // initial-headers-sync with this peer. Receiving headers announcements for
5432  // new blocks while trying to sync their headers chain is problematic,
5433  // because of the state tracking done.
5434  if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5435  LOCK(cs_main);
5436  CNodeState &state = *State(node.GetId());
5437  if (state.pindexBestKnownBlock != nullptr &&
5438  state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5439  // Tell our peer we prefer to receive headers rather than inv's
5440  // We send this to non-NODE NETWORK peers as well, because even
5441  // non-NODE NETWORK peers can announce blocks (such as pruning
5442  // nodes)
5443  MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
5444  peer.m_sent_sendheaders = true;
5445  }
5446  }
5447 }
5448 
5449 void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5450 {
5451  if (m_opts.ignore_incoming_txs) return;
5452  if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5453  // peers with the forcerelay permission should not filter txs to us
5455  // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5456  // transactions to us, regardless of feefilter state.
5457  if (pto.IsBlockOnlyConn()) return;
5458 
5459  CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5460 
5461  if (m_chainman.IsInitialBlockDownload()) {
5462  // Received tx-inv messages are discarded when the active
5463  // chainstate is in IBD, so tell the peer to not send them.
5464  currentFilter = MAX_MONEY;
5465  } else {
5466  static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5467  if (peer.m_fee_filter_sent == MAX_FILTER) {
5468  // Send the current filter if we sent MAX_FILTER previously
5469  // and made it out of IBD.
5470  peer.m_next_send_feefilter = 0us;
5471  }
5472  }
5473  if (current_time > peer.m_next_send_feefilter) {
5474  CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5475  // We always have a fee filter of at least the min relay fee
5476  filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
5477  if (filterToSend != peer.m_fee_filter_sent) {
5478  MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
5479  peer.m_fee_filter_sent = filterToSend;
5480  }
5481  peer.m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
5482  }
5483  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5484  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5485  else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5486  (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5487  peer.m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5488  }
5489 }
5490 
5491 namespace {
5492 class CompareInvMempoolOrder
5493 {
5494  CTxMemPool* mp;
5495  bool m_wtxid_relay;
5496 public:
5497  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5498  {
5499  mp = _mempool;
5500  m_wtxid_relay = use_wtxid;
5501  }
5502 
5503  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5504  {
5505  /* As std::make_heap produces a max-heap, we want the entries with the
5506  * fewest ancestors/highest fee to sort later. */
5507  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5508  }
5509 };
5510 } // namespace
5511 
5512 bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5513 {
5514  // block-relay-only peers may never send txs to us
5515  if (peer.IsBlockOnlyConn()) return true;
5516  if (peer.IsFeelerConn()) return true;
5517  // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5518  if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5519  return false;
5520 }
5521 
5522 bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5523 {
5524  // We don't participate in addr relay with outbound block-relay-only
5525  // connections to prevent providing adversaries with the additional
5526  // information of addr traffic to infer the link.
5527  if (node.IsBlockOnlyConn()) return false;
5528 
5529  if (!peer.m_addr_relay_enabled.exchange(true)) {
5530  // During version message processing (non-block-relay-only outbound peers)
5531  // or on first addr-related message we have received (inbound peers), initialize
5532  // m_addr_known.
5533  peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5534  }
5535 
5536  return true;
5537 }
5538 
5539 bool PeerManagerImpl::SendMessages(CNode* pto)
5540 {
5541  AssertLockHeld(g_msgproc_mutex);
5542 
5543  PeerRef peer = GetPeerRef(pto->GetId());
5544  if (!peer) return false;
5545  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5546 
5547  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5548  // disconnect misbehaving peers even before the version handshake is complete.
5549  if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5550 
5551  // Don't send anything until the version handshake is complete
5552  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5553  return true;
5554 
5555  const auto current_time{GetTime<std::chrono::microseconds>()};
5556 
5557  if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5558  LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
5559  pto->fDisconnect = true;
5560  return true;
5561  }
5562 
5563  MaybeSendPing(*pto, *peer, current_time);
5564 
5565  // MaybeSendPing may have marked peer for disconnection
5566  if (pto->fDisconnect) return true;
5567 
5568  MaybeSendAddr(*pto, *peer, current_time);
5569 
5570  MaybeSendSendHeaders(*pto, *peer);
5571 
5572  {
5573  LOCK(cs_main);
5574 
5575  CNodeState &state = *State(pto->GetId());
5576 
5577  // Start block sync
5578  if (m_chainman.m_best_header == nullptr) {
5579  m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5580  }
5581 
5582  // Determine whether we might try initial headers sync or parallel
5583  // block download from this peer -- this mostly affects behavior while
5584  // in IBD (once out of IBD, we sync from all peers).
5585  bool sync_blocks_and_headers_from_peer = false;
5586  if (state.fPreferredDownload) {
5587  sync_blocks_and_headers_from_peer = true;
5588  } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5589  // Typically this is an inbound peer. If we don't have any outbound
5590  // peers, or if we aren't downloading any blocks from such peers,
5591  // then allow block downloads from this peer, too.
5592  // We prefer downloading blocks from outbound peers to avoid
5593  // putting undue load on (say) some home user who is just making
5594  // outbound connections to the network, but if our only source of
5595  // the latest blocks is from an inbound peer, we have to be sure to
5596  // eventually download it (and not just wait indefinitely for an
5597  // outbound peer to have it).
5598  if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5599  sync_blocks_and_headers_from_peer = true;
5600  }
5601  }
5602 
5603  if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5604  // Only actively request headers from a single peer, unless we're close to today.
5605  if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
5606  const CBlockIndex* pindexStart = m_chainman.m_best_header;
5607  /* If possible, start at the block preceding the currently
5608  best known header. This ensures that we always get a
5609  non-empty list of headers back as long as the peer
5610  is up-to-date. With a non-empty response, we can initialise
5611  the peer's known best block. This wouldn't be possible
5612  if we requested starting at m_chainman.m_best_header and
5613  got back an empty response. */
5614  if (pindexStart->pprev)
5615  pindexStart = pindexStart->pprev;
5616  if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5617  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5618 
5619  state.fSyncStarted = true;
5620  peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5621  (
5622  // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5623  // to maintain precision
5624  std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5625  Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5626  );
5627  nSyncStarted++;
5628  }
5629  }
5630  }
5631 
5632  //
5633  // Try sending block announcements via headers
5634  //
5635  {
5636  // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5637  // list of block hashes we're relaying, and our peer wants
5638  // headers announcements, then find the first header
5639  // not yet known to our peer but would connect, and send.
5640  // If no header would connect, or if we have too many
5641  // blocks, or if the peer doesn't want headers, just
5642  // add all to the inv queue.
5643  LOCK(peer->m_block_inv_mutex);
5644  std::vector<CBlock> vHeaders;
5645  bool fRevertToInv = ((!peer->m_prefers_headers &&
5646  (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5647  peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5648  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5649  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5650 
5651  if (!fRevertToInv) {
5652  bool fFoundStartingHeader = false;
5653  // Try to find first header that our peer doesn't have, and
5654  // then send all headers past that one. If we come across any
5655  // headers that aren't on m_chainman.ActiveChain(), give up.
5656  for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5657  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5658  assert(pindex);
5659  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5660  // Bail out if we reorged away from this block
5661  fRevertToInv = true;
5662  break;
5663  }
5664  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5665  // This means that the list of blocks to announce don't
5666  // connect to each other.
5667  // This shouldn't really be possible to hit during
5668  // regular operation (because reorgs should take us to
5669  // a chain that has some block not on the prior chain,
5670  // which should be caught by the prior check), but one
5671  // way this could happen is by using invalidateblock /
5672  // reconsiderblock repeatedly on the tip, causing it to
5673  // be added multiple times to m_blocks_for_headers_relay.
5674  // Robustly deal with this rare situation by reverting
5675  // to an inv.
5676  fRevertToInv = true;
5677  break;
5678  }
5679  pBestIndex = pindex;
5680  if (fFoundStartingHeader) {
5681  // add this to the headers message
5682  vHeaders.emplace_back(pindex->GetBlockHeader());
5683  } else if (PeerHasHeader(&state, pindex)) {
5684  continue; // keep looking for the first new block
5685  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5686  // Peer doesn't have this header but they do have the prior one.
5687  // Start sending headers.
5688  fFoundStartingHeader = true;
5689  vHeaders.emplace_back(pindex->GetBlockHeader());
5690  } else {
5691  // Peer doesn't have this header or the prior one -- nothing will
5692  // connect, so bail out.
5693  fRevertToInv = true;
5694  break;
5695  }
5696  }
5697  }
5698  if (!fRevertToInv && !vHeaders.empty()) {
5699  if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5700  // We only send up to 1 block as header-and-ids, as otherwise
5701  // probably means we're doing an initial-ish-sync or they're slow
5702  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5703  vHeaders.front().GetHash().ToString(), pto->GetId());
5704 
5705  std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5706  {
5707  LOCK(m_most_recent_block_mutex);
5708  if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5709  cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5710  }
5711  }
5712  if (cached_cmpctblock_msg.has_value()) {
5713  PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
5714  } else {
5715  CBlock block;
5716  const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)};
5717  assert(ret);
5718  CBlockHeaderAndShortTxIDs cmpctblock{block};
5719  MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock);
5720  }
5721  state.pindexBestHeaderSent = pBestIndex;
5722  } else if (peer->m_prefers_headers) {
5723  if (vHeaders.size() > 1) {
5724  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5725  vHeaders.size(),
5726  vHeaders.front().GetHash().ToString(),
5727  vHeaders.back().GetHash().ToString(), pto->GetId());
5728  } else {
5729  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5730  vHeaders.front().GetHash().ToString(), pto->GetId());
5731  }
5732  MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
5733  state.pindexBestHeaderSent = pBestIndex;
5734  } else
5735  fRevertToInv = true;
5736  }
5737  if (fRevertToInv) {
5738  // If falling back to using an inv, just try to inv the tip.
5739  // The last entry in m_blocks_for_headers_relay was our tip at some point
5740  // in the past.
5741  if (!peer->m_blocks_for_headers_relay.empty()) {
5742  const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5743  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5744  assert(pindex);
5745 
5746  // Warn if we're announcing a block that is not on the main chain.
5747  // This should be very rare and could be optimized out.
5748  // Just log for now.
5749  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5750  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5751  hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5752  }
5753 
5754  // If the peer's chain has this block, don't inv it back.
5755  if (!PeerHasHeader(&state, pindex)) {
5756  peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5757  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5758  pto->GetId(), hashToAnnounce.ToString());
5759  }
5760  }
5761  }
5762  peer->m_blocks_for_headers_relay.clear();
5763  }
5764 
5765  //
5766  // Message: inventory
5767  //
5768  std::vector<CInv> vInv;
5769  {
5770  LOCK(peer->m_block_inv_mutex);
5771  vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5772 
5773  // Add blocks
5774  for (const uint256& hash : peer->m_blocks_for_inv_relay) {
5775  vInv.emplace_back(MSG_BLOCK, hash);
5776  if (vInv.size() == MAX_INV_SZ) {
5777  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5778  vInv.clear();
5779  }
5780  }
5781  peer->m_blocks_for_inv_relay.clear();
5782  }
5783 
5784  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5785  LOCK(tx_relay->m_tx_inventory_mutex);
5786  // Check whether periodic sends should happen
5787  bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
5788  if (tx_relay->m_next_inv_send_time < current_time) {
5789  fSendTrickle = true;
5790  if (pto->IsInboundConn()) {
5791  tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5792  } else {
5793  tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5794  }
5795  }
5796 
5797  // Time to send but the peer has requested we not relay transactions.
5798  if (fSendTrickle) {
5799  LOCK(tx_relay->m_bloom_filter_mutex);
5800  if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5801  }
5802 
5803  // Respond to BIP35 mempool requests
5804  if (fSendTrickle && tx_relay->m_send_mempool) {
5805  auto vtxinfo = m_mempool.infoAll();
5806  tx_relay->m_send_mempool = false;
5807  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5808 
5809  LOCK(tx_relay->m_bloom_filter_mutex);
5810 
5811  for (const auto& txinfo : vtxinfo) {
5812  CInv inv{
5813  peer->m_wtxid_relay ? MSG_WTX : MSG_TX,
5814  peer->m_wtxid_relay ?
5815  txinfo.tx->GetWitnessHash().ToUint256() :
5816  txinfo.tx->GetHash().ToUint256(),
5817  };
5818  tx_relay->m_tx_inventory_to_send.erase(inv.hash);
5819 
5820  // Don't send transactions that peers will not put into their mempool
5821  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5822  continue;
5823  }
5824  if (tx_relay->m_bloom_filter) {
5825  if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5826  }
5827  tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
5828  vInv.push_back(inv);
5829  if (vInv.size() == MAX_INV_SZ) {
5830  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5831  vInv.clear();
5832  }
5833  }
5834  }
5835 
5836  // Determine transactions to relay
5837  if (fSendTrickle) {
5838  // Produce a vector with all candidates for sending
5839  std::vector<std::set<uint256>::iterator> vInvTx;
5840  vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5841  for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5842  vInvTx.push_back(it);
5843  }
5844  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5845  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5846  // A heap is used so that not all items need sorting if only a few are being sent.
5847  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
5848  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5849  // No reason to drain out at many times the network's capacity,
5850  // especially since we have many peers and some will draw much shorter delays.
5851  unsigned int nRelayedTransactions = 0;
5852  LOCK(tx_relay->m_bloom_filter_mutex);
5853  size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
5854  broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
5855  while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5856  // Fetch the top element from the heap
5857  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5858  std::set<uint256>::iterator it = vInvTx.back();
5859  vInvTx.pop_back();
5860  uint256 hash = *it;
5861  CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5862  // Remove it from the to-be-sent set
5863  tx_relay->m_tx_inventory_to_send.erase(it);
5864  // Check if not in the filter already
5865  if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
5866  continue;
5867  }
5868  // Not in the mempool anymore? don't bother sending it.
5869  auto txinfo = m_mempool.info(ToGenTxid(inv));
5870  if (!txinfo.tx) {
5871  continue;
5872  }
5873  // Peer told you to not send transactions at that feerate? Don't bother sending it.
5874  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5875  continue;
5876  }
5877  if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5878  // Send
5879  vInv.push_back(inv);
5880  nRelayedTransactions++;
5881  if (vInv.size() == MAX_INV_SZ) {
5882  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5883  vInv.clear();
5884  }
5885  tx_relay->m_tx_inventory_known_filter.insert(hash);
5886  }
5887 
5888  // Ensure we'll respond to GETDATA requests for anything we've just announced
5889  LOCK(m_mempool.cs);
5890  tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
5891  }
5892  }
5893  if (!vInv.empty())
5894  MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
5895 
5896  // Detect whether we're stalling
5897  auto stalling_timeout = m_block_stalling_timeout.load();
5898  if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5899  // Stalling only triggers when the block download window cannot move. During normal steady state,
5900  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5901  // should only happen during initial block download.
5902  LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5903  pto->fDisconnect = true;
5904  // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
5905  // bandwidth is insufficient.
5906  const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
5907  if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5908  LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
5909  }
5910  return true;
5911  }
5912  // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
5913  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
5914  // We compensate for other peers to prevent killing off peers due to our own downstream link
5915  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5916  // to unreasonably increase our timeout.
5917  if (state.vBlocksInFlight.size() > 0) {
5918  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5919  int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5920  if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
5921  LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5922  pto->fDisconnect = true;
5923  return true;
5924  }
5925  }
5926  // Check for headers sync timeouts
5927  if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5928  // Detect whether this is a stalling initial-headers-sync peer
5929  if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
5930  if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5931  // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5932  // and we have others we could be using instead.
5933  // Note: If all our peers are inbound, then we won't
5934  // disconnect our sync peer for stalling; we have bigger
5935  // problems if we can't get any outbound peers.
5937  LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5938  pto->fDisconnect = true;
5939  return true;
5940  } else {
5941  LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5942  // Reset the headers sync state so that we have a
5943  // chance to try downloading from a different peer.
5944  // Note: this will also result in at least one more
5945  // getheaders message to be sent to
5946  // this peer (eventually).
5947  state.fSyncStarted = false;
5948  nSyncStarted--;
5949  peer->m_headers_sync_timeout = 0us;
5950  }
5951  }
5952  } else {
5953  // After we've caught up once, reset the timeout so we can't trigger
5954  // disconnect later.
5955  peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5956  }
5957  }
5958 
5959  // Check that outbound peers have reasonable chains
5960  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
5961  ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5962 
5963  //
5964  // Message: getdata (blocks)
5965  //
5966  std::vector<CInv> vGetData;
5967  if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5968  std::vector<const CBlockIndex*> vToDownload;
5969  NodeId staller = -1;
5970  auto get_inflight_budget = [&state]() {
5971  return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
5972  };
5973 
5974  // If a snapshot chainstate is in use, we want to find its next blocks
5975  // before the background chainstate to prioritize getting to network tip.
5976  FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
5977  if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
5978  TryDownloadingHistoricalBlocks(
5979  *peer,
5980  get_inflight_budget(),
5981  vToDownload, m_chainman.GetBackgroundSyncTip(),
5982  Assert(m_chainman.GetSnapshotBaseBlock()));
5983  }
5984  for (const CBlockIndex *pindex : vToDownload) {
5985  uint32_t nFetchFlags = GetFetchFlags(*peer);
5986  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
5987  BlockRequested(pto->GetId(), *pindex);
5988  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
5989  pindex->nHeight, pto->GetId());
5990  }
5991  if (state.vBlocksInFlight.empty() && staller != -1) {
5992  if (State(staller)->m_stalling_since == 0us) {
5993  State(staller)->m_stalling_since = current_time;
5994  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
5995  }
5996  }
5997  }
5998 
5999  //
6000  // Message: getdata (transactions)
6001  //
6002  std::vector<std::pair<NodeId, GenTxid>> expired;
6003  auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
6004  for (const auto& entry : expired) {
6005  LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
6006  entry.second.GetHash().ToString(), entry.first);
6007  }
6008  for (const GenTxid& gtxid : requestable) {
6009  if (!AlreadyHaveTx(gtxid)) {
6010  LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
6011  gtxid.GetHash().ToString(), pto->GetId());
6012  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
6013  if (vGetData.size() >= MAX_GETDATA_SZ) {
6014  MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
6015  vGetData.clear();
6016  }
6017  m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
6018  } else {
6019  // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
6020  // this should already be called whenever a transaction becomes AlreadyHaveTx().
6021  m_txrequest.ForgetTxHash(gtxid.GetHash());
6022  }
6023  }
6024 
6025 
6026  if (!vGetData.empty())
6027  MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
6028  } // release cs_main
6029  MaybeSendFeefilter(*pto, *peer, current_time);
6030  return true;
6031 }
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:423
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:40
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:1000
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:174
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, Options opts)
bool IsMsgWtx() const
Definition: protocol.h:473
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:28
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:963
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
int ret
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:31
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:20
std::atomic_bool fPauseSend
Definition: net.h:742
invalid by consensus rules
bool HaveNumChainTxs() const
Check whether this block&#39;s and all previous blocks&#39; transactions have been downloaded (and stored to ...
Definition: chain.h:275
std::chrono::time_point< NodeClock > time_point
Definition: time.h:17
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:39
AssertLockHeld(pool.cs)
static GenTxid Wtxid(const uint256 &hash)
Definition: transaction.h:435
bool IsMsgTx() const
Definition: protocol.h:471
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:17
Definition: banman.h:58
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
std::vector< Byte > ParseHex(std::string_view hex_str)
Like TryParseHex, but returns an empty vector on invalid input.
Definition: strencodings.h:65
ServiceFlags
nServices flags
Definition: protocol.h:274
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:319
bool IsLocal() const
Definition: netaddress.cpp:399
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3732
#define LogPrint(category,...)
Definition: logging.h:264
int64_t GetBlockTime() const
Definition: chain.h:282
assert(!tx.IsCoinBase())
NodeSeconds Time() const
Definition: chain.h:277
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:123
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected Provides the block that was disconnected.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:156
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:822
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
uint64_t m_addr_rate_limited
unsigned int nonce
Definition: miner_tests.cpp:71
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:3618
virtual void UnitTestMisbehaving(NodeId peer_id, int howmuch)=0
std::string ToString() const
Definition: protocol.cpp:158
RecursiveMutex & GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex)
bool exists(const GenTxid &gtxid) const
Definition: txmempool.h:674
Definition: block.h:68
ReconciliationRegisterResult
We don&#39;t have the previous block the checked one is built on.
Data structure to keep track of, and schedule, transaction downloads from peers.
Definition: txrequest.h:96
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:26
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:352
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:856
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:846
std::vector< uint16_t > indexes
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1162
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:765
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
bool IsMsgFilteredBlk() const
Definition: protocol.h:474
An in-memory indexed chain of blocks.
Definition: chain.h:446
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:710
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests.cpp.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:1023
void Discourage(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Definition: banman.cpp:124
const std::optional< std::list< CTransactionRef > > m_replaced_transactions
Mempool transactions replaced by the tx.
Definition: validation.h:142
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
reverse_range< T > reverse_iterate(T &x)
transaction was not validated because package failed
inv message data
Definition: protocol.h:457
invalid proof of work or time too old
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:36
A class to track orphan transactions (failed on TX_MISSING_INPUTS) Since we cannot distinguish orphan...
Definition: txorphanage.h:21
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:66
constexpr auto GetRandMillis
Definition: random.h:98
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:165
transaction was missing some of its inputs
bool IsMsgCmpctBlk() const
Definition: protocol.h:475
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network, const bool filtered=true) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:3393
bool IsFeelerConn() const
Definition: net.h:808
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1066
int in_avail() const
Definition: streams.h:216
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:97
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:27
CBlockHeader GetBlockHeader() const
Definition: chain.h:245
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
int Height() const
Return the maximal height in the chain.
Definition: chain.h:492
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
static constexpr SerParams V2_NETWORK
Definition: protocol.h:374
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: policy.h:57
unsigned long size() const
Definition: txmempool.h:656
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:43
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:104
void SetCommonVersion(int greatest_common_version)
Definition: net.h:924
We&#39;re done syncing with this peer and can discard any remaining state.
uint64_t GetSequence() const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.h:732
Defined in BIP152.
Definition: protocol.h:448
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:108
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
const TxValidationState m_state
Contains information about why the transaction failed.
Definition: validation.h:139
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: netbase.h:91
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:80
violated mempool&#39;s fee/size/descendant/RBF/etc limits
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
the block header may be on a too-little-work chain
Mutex m_subver_mutex
Definition: net.h:722
bool IsNull() const
Definition: block.h:152
inputs (covered by txid) failed policy rules
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:2334
void ignore(size_t num_ignore)
Definition: streams.h:236
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:434
const ResultType m_result_type
Result type.
Definition: validation.h:136
void LimitOrphans(unsigned int max_orphans, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the orphanage to the given maximum.
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
uint64_t GetLocalNonce() const
Definition: net.h:903
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:315
transaction spends a coinbase too early, or violates locktime/sequence locks
static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS
Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:41
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:29
bool MultipleManualOrFullOutboundConns(Network net) const EXCLUSIVE_LOCKS_REQUIRED(m_nodes_mutex)
Definition: net.cpp:2393
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:42
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:62
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
initial value. Tx has not yet been rejected
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.cpp:46
bool GetTryNewOutboundPeer() const
Definition: net.cpp:2329
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
enum Network GetNetwork() const
Definition: netaddress.cpp:497
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:24
bool empty() const
Definition: streams.h:182
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:44
std::string ToStringAddrPort() const
Definition: netaddress.cpp:889
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we&#39;re only buildi...
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can&#39;t reach it ourselves.
Definition: netaddress.h:218
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
CSerializedNetMsg Make(std::string msg_type, Args &&... args)
const std::vector< CTxIn > vin
Definition: transaction.h:306
bool HaveTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Does this peer have any work to do?
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:18
Stochastic address manager.
Definition: addrman.h:87
bool IsBanned(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is banned.
Definition: banman.cpp:89
Transaction validation functions.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3798
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Functions to serialize / deserialize common bitcoin types.
Definition: common-types.h:50
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
bool IsValid() const
Definition: netaddress.cpp:425
bool DisconnectNode(const std::string &node)
Definition: net.cpp:3518
int EraseTx(const Txid &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase an orphan by txid.
Definition: txorphanage.cpp:57
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:3899
int GetExtraBlockRelayCount() const
Definition: net.cpp:2366
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:23
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
uint256 GetBlockHash() const
Definition: chain.h:258
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
bool IsValid() const
Definition: validation.h:122
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
virtual bool HasAllDesirableServiceFlags(ServiceFlags services) const =0
Callback to determine whether the given set of service flags are sufficient for a peer to be "relevan...
BlockFilterType
Definition: blockfilter.h:92
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:109
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:207
initial value. Block has not yet been rejected
bool IsGenBlkMsg() const
Definition: protocol.h:483
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:108
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:574
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:125
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:22
virtual void ProcessMessage(CNode &pfrom, const std::string &msg_type, DataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
CAmount m_fee_filter_received
const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
Definition: protocol.cpp:47
virtual void InitializeNode(CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:238
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
Definition: validation.h:1071
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman&#39;s new table.
Definition: addrman.cpp:1295
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
uint64_t m_addr_processed
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:864
ArgsManager & args
Definition: bitcoind.cpp:268
Invalid by a change to consensus rules more recent than SegWit.
Scripts & signatures ok. Implies all parents are either at least VALID_SCRIPTS, or are ASSUMED_VALID...
Definition: chain.h:111
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
Definition: net.h:681
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:25
std::vector< CTransactionRef > txn
std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future sampled from an exponential distribution (https://en.wikipedia.org/wiki/Exponential_distribution).
Definition: random.cpp:764
this block was cached as being invalid and we didn&#39;t store the reason why
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:860
An input of a transaction.
Definition: transaction.h:66
bool IsDiscouraged(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is discouraged.
Definition: banman.cpp:83
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
#define LOCK(cs)
Definition: sync.h:257
void StartExtraBlockRelayPeers()
Definition: net.cpp:2340
const char * name
Definition: rest.cpp:49
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:146
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.cpp:16
std::string ToString() const
Definition: validation.h:128
the block failed to meet one of our checkpoints
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:477
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:530
Fast randomness source.
Definition: random.h:144
Transport protocol agnostic message container.
Definition: net.h:237
Txid hash
Definition: transaction.h:31
int64_t nPowTargetSpacing
Definition: params.h:112
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all orphans announced by a peer (eg, after that peer disconnects)
Definition: txorphanage.cpp:97
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:483
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:34
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:27
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:56
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr SerParams V1
Definition: netaddress.h:231
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:610
bool IsGenTxMsg() const
Definition: protocol.h:479
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
bool LoadingBlocks() const
Definition: blockstorage.h:325
bool IsManualConn() const
Definition: net.h:784
virtual void FinalizeNode(const CNode &node)=0
Handle removal of a peer (clear state)
int GetExtraFullOutboundCount() const
Definition: net.cpp:2352
A CService with information about it as peer.
Definition: protocol.h:331
std::string ToString() const
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition: addrman.cpp:1335
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:881
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
const CFeeRate m_min_relay_feerate
Definition: txmempool.h:440
uint256 hash
Definition: protocol.h:489
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1068
#define LogPrintLevel(category, level,...)
Definition: logging.h:252
static const int PROTOCOL_VERSION
network protocol versioning
Result GetResult() const
Definition: validation.h:125
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:15
bool ExpectServicesFromConn() const
Definition: net.h:820
virtual ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const =0
Gets the set of service flags which are "desirable" for a given peer.
int64_t presync_height
int64_t NodeId
Definition: net.h:102
Definition: net.h:1040
Defined in BIP144.
Definition: protocol.h:449
bool GetNetworkActive() const
Definition: net.h:1119
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:77
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:33
std::string ToString() const
Definition: uint256.cpp:55
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:851
std::vector< uint256 > vHave
Definition: block.h:134
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
Definition: validation.h:956
virtual std::optional< std::string > FetchBlock(NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
NodeId GetId() const
Definition: net.h:899
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:30
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
Definition: protocol.h:421
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3811
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition: net.h:720
Parameters that influence chain consensus.
Definition: params.h:74
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:956
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:25
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
std::atomic_bool fDisconnect
Definition: net.h:736
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:119
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:35
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:42
CFeeRate GetMinFee(size_t sizelimit) const
Definition: txmempool.cpp:1091
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:877
constexpr bool IsNull() const
Definition: uint256.h:42
bool IsMsgWitnessBlk() const
Definition: protocol.h:476
fails some policy, but might be acceptable if submitted in a (different) package
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
size_t Size() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Return how many entries exist in the orphange.
Definition: txorphanage.h:55
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition: validation.h:127
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:301
bool IsRoutable() const
Definition: netaddress.cpp:463
#define Assume(val)
Assume is the identity function.
Definition: check.h:89
256-bit unsigned big integer.
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any orphans that list a particular tx as a parent into the from peer&#39;s work set.
bool IsWtxid() const
Definition: transaction.h:436
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:325
std::chrono::seconds PowTargetSpacing() const
Definition: params.h:114
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:54
std::chrono::microseconds m_ping_wait
TxMempoolInfo info_for_relay(const GenTxid &gtxid, uint64_t last_sequence) const
Returns info for a transaction if its entry_sequence < last_sequence.
Definition: txmempool.cpp:861
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:370
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
CTransactionRef GetTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Extract a transaction from a peer&#39;s work set Returns nullptr if there are no transactions to work on...
Definition: init.h:25
const CAddress addr
Definition: net.h:713
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:21
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
bool IsBlockOnlyConn() const
Definition: net.h:804
#define LogInfo(...)
Definition: logging.h:240
Transaction is missing a witness.
size_type size() const
Definition: streams.h:181
if(!SetupNetworking())
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:311
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:14
bool IsMsgBlk() const
Definition: protocol.h:472
uint256 GetHash() const
Definition: block.cpp:11
256-bit opaque blob.
Definition: uint256.h:106
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:52
ServiceFlags their_services
invalid by consensus rules (excluding any below reasons)
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:70
bool HasWitness() const
Definition: transaction.h:373
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:265
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
const arith_uint256 & MinimumChainWork() const
Definition: validation.h:934
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:423
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:72
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:37
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
the block&#39;s data didn&#39;t match the data committed to by the PoW
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition: net.cpp:2177
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:299
#define LogDebug(category,...)
Definition: logging.h:260
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
virtual void SendPings()=0
Send ping message to all peers.
std::set< uint256 > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:714
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:13
uint32_t GetMappedAS(const CNetAddr &addr) const
Definition: net.cpp:3501
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:138
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:149
const CChainParams & Params()
Return the currently selected parameters.
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new orphan transaction.
Definition: txorphanage.cpp:20
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all orphans included in or invalidated by a new block.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
virtual void RelayTransaction(const uint256 &txid, const uint256 &wtxid)=0
Relay transaction to all peers.
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
bool IsTxAvailable(size_t index) const
A block this one builds on is invalid.
TxMempoolInfo info(const GenTxid &gtxid) const
Definition: txmempool.cpp:852
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry&#39;s service bits.
Definition: addrman.cpp:1340
bool fLogIPs
Definition: logging.cpp:41
#define TRACE6(context, event, a, b, c, d, e, f)
Definition: trace.h:38
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:27
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:463
#define LIMITED_STRING(obj, n)
Definition: serialize.h:515
std::atomic< int64_t > nTimeOffset
Definition: net.h:711
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:19
bool fListen
Definition: net.cpp:116
Fee rate in satoshis per kilovirtualbyte: CAmount / kvB.
Definition: feerate.h:32
static constexpr auto OVERLOADED_PEER_TX_DELAY
How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT).
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span
Like the Span constructor, but for (const) unsigned char member types only.
Definition: span.h:304
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition: net.h:733
SipHash-2-4.
Definition: siphash.h:14
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:26
#define AssertLockNotHeld(cs)
Definition: sync.h:147
bool IsInvalid() const
Definition: validation.h:123
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:70
static int count
std::atomic< int > nVersion
Definition: net.h:721
Invalid by a change to consensus rules more recent than SegWit.
#define GUARDED_BY(x)
Definition: threadsafety.h:38
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
Definition: validation.h:1076
std::string ConnectionTypeAsString() const
Definition: net.h:953
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1960
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:45
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
this node does not have a mempool so can&#39;t validate the transaction
CTransactionRef get(const uint256 &hash) const
Definition: txmempool.cpp:843
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
Definition: logging.h:210
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:853
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:50
const uint256 & ToUint256() const LIFETIMEBOUND
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:1029
bool IsAddrFetchConn() const
Definition: net.h:812
HeadersSyncState:
Definition: headerssync.h:101
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:131
A Span is an object that can refer to a contiguous sequence of objects.
Definition: solver.h:20
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:23
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:295
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:162
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:223
Information about a peer.
Definition: net.h:676
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:93
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
std::vector< int > vHeightInFlight
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e...
Definition: net.h:871
void ForEachNode(const NodeFn &func)
Definition: net.h:1134
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:38
static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT
Maximum number of in-flight transaction requests from a peer.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:120
full block available in blk*.dat
Definition: chain.h:117
virtual bool SendMessages(CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
bool Good(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
Mark an address record as accessible and attempt to move it to addrman&#39;s tried table.
Definition: addrman.cpp:1300
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:42
static constexpr SerParams V1_NETWORK
Definition: protocol.h:373
#define LogPrintf(...)
Definition: logging.h:245
int64_t GetTime()
DEPRECATED, see GetTime.
Definition: time.cpp:97
Defined in BIP 339.
Definition: protocol.h:445
int GetCommonVersion() const
Definition: net.h:929
bool HaveTx(const GenTxid &gtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if we already have an orphan transaction (by txid or wtxid)
const std::string m_addr_name
Definition: net.h:716
COutPoint prevout
Definition: transaction.h:69
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:478
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:729
bool IsInboundConn() const
Definition: net.h:816
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: cs_main.cpp:8
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we&#39;re willing to process on average.
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
This is used by net_processing to report pre-synchronization progress of headers, as headers are not ...
otherwise didn&#39;t meet our local policy rules
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
A generic txid reference (txid or wtxid).
Definition: transaction.h:427
CAmount GetFeePerK() const
Return the fee in satoshis for a vsize of 1000 vbytes.
Definition: feerate.h:65
virtual bool ProcessMessages(CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
bool GetUseAddrmanOutgoing() const
Definition: net.h:1120
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:181
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1065
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:21
static GenTxid Txid(const uint256 &hash)
Definition: transaction.h:434
const Wtxid & GetWitnessHash() const LIFETIMEBOUND
Definition: transaction.h:344
static constexpr auto GETDATA_TX_INTERVAL
How long to wait before downloading a transaction from an additional peer.
const uint256 & GetHash() const LIFETIMEBOUND
Definition: transaction.h:437
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:32
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it...
Definition: txmempool.h:388
bool IsFullOutboundConn() const
Definition: net.h:780
#define Assert(val)
Identity function.
Definition: check.h:77
const Txid & GetHash() const LIFETIMEBOUND
Definition: transaction.h:343
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:146
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS
Maximum number of unconnecting headers announcements before DoS score.
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:195
static constexpr TransactionSerParams TX_NO_WITNESS
Definition: transaction.h:196
ReachableNets g_reachable_nets
Definition: netbase.cpp:35
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:39
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:38
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.