Bitcoin Core  26.1.0
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2022 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/amount.h>
14 #include <consensus/validation.h>
15 #include <deploymentstatus.h>
16 #include <hash.h>
17 #include <headerssync.h>
18 #include <index/blockfilterindex.h>
19 #include <kernel/mempool_entry.h>
20 #include <logging.h>
21 #include <kernel/chain.h>
22 #include <merkleblock.h>
23 #include <netbase.h>
24 #include <netmessagemaker.h>
25 #include <node/blockstorage.h>
26 #include <node/txreconciliation.h>
27 #include <policy/fees.h>
28 #include <policy/policy.h>
29 #include <policy/settings.h>
30 #include <primitives/block.h>
31 #include <primitives/transaction.h>
32 #include <random.h>
33 #include <reverse_iterator.h>
34 #include <scheduler.h>
35 #include <streams.h>
36 #include <sync.h>
37 #include <timedata.h>
38 #include <tinyformat.h>
39 #include <txmempool.h>
40 #include <txorphanage.h>
41 #include <txrequest.h>
42 #include <util/check.h> // For NDEBUG compile time check
43 #include <util/strencodings.h>
44 #include <util/trace.h>
45 #include <validation.h>
46 
47 #include <algorithm>
48 #include <atomic>
49 #include <chrono>
50 #include <future>
51 #include <memory>
52 #include <optional>
53 #include <typeinfo>
54 
57 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
58 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
60 static constexpr auto HEADERS_RESPONSE_TIME{2min};
64 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
66 static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
68 static constexpr auto STALE_CHECK_INTERVAL{10min};
70 static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
72 static constexpr auto MINIMUM_CONNECT_TIME{30s};
74 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
77 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
80 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
82 static constexpr auto PING_INTERVAL{2min};
84 static const unsigned int MAX_LOCATOR_SZ = 101;
86 static const unsigned int MAX_INV_SZ = 50000;
89 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
94 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
96 static constexpr auto TXID_RELAY_DELAY{2s};
98 static constexpr auto NONPREF_PEER_TX_DELAY{2s};
100 static constexpr auto OVERLOADED_PEER_TX_DELAY{2s};
102 static constexpr auto GETDATA_TX_INTERVAL{60s};
104 static const unsigned int MAX_GETDATA_SZ = 1000;
106 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
109 static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
111 static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
114 static const unsigned int MAX_HEADERS_RESULTS = 2000;
117 static const int MAX_CMPCTBLOCK_DEPTH = 5;
119 static const int MAX_BLOCKTXN_DEPTH = 10;
124 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
126 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
128 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
130 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
132 static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
134 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
136 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
138 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
140 static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
143 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
147 static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
150 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
154 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
155 static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
156 static_assert(INVENTORY_BROADCAST_MAX <= MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
158 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
160 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
162 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
164 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
166 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
168 static constexpr size_t MAX_ADDR_TO_SEND{1000};
171 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
177 static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
178 
179 // Internal stuff
180 namespace {
182 struct QueuedBlock {
184  const CBlockIndex* pindex;
186  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
187 };
188 
201 struct Peer {
203  const NodeId m_id{0};
204 
218  const ServiceFlags m_our_services;
220  std::atomic<ServiceFlags> m_their_services{NODE_NONE};
221 
223  Mutex m_misbehavior_mutex;
225  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
227  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
228 
230  Mutex m_block_inv_mutex;
234  std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
238  std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
243  uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
244 
246  std::atomic<int> m_starting_height{-1};
247 
249  std::atomic<uint64_t> m_ping_nonce_sent{0};
251  std::atomic<std::chrono::microseconds> m_ping_start{0us};
253  std::atomic<bool> m_ping_queued{false};
254 
256  std::atomic<bool> m_wtxid_relay{false};
263  std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
264 
265  struct TxRelay {
266  mutable RecursiveMutex m_bloom_filter_mutex;
268  bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
270  std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
271 
272  mutable RecursiveMutex m_tx_inventory_mutex;
276  CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
281  std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
285  bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
288  std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
291  uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
292 
294  std::atomic<CAmount> m_fee_filter_received{0};
295  };
296 
297  /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
298  TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
299  {
300  LOCK(m_tx_relay_mutex);
301  Assume(!m_tx_relay);
302  m_tx_relay = std::make_unique<Peer::TxRelay>();
303  return m_tx_relay.get();
304  };
305 
306  TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
307  {
308  return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
309  };
310 
312  std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
322  std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
337  std::atomic_bool m_addr_relay_enabled{false};
339  bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
341  mutable Mutex m_addr_send_times_mutex;
343  std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
345  std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
348  std::atomic_bool m_wants_addrv2{false};
350  bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
353  double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
355  std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
357  std::atomic<uint64_t> m_addr_rate_limited{0};
359  std::atomic<uint64_t> m_addr_processed{0};
360 
362  bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
363 
365  Mutex m_getdata_requests_mutex;
367  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
368 
371 
373  Mutex m_headers_sync_mutex;
376  std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
377 
379  std::atomic<bool> m_sent_sendheaders{false};
380 
382  int m_num_unconnecting_headers_msgs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
383 
385  std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
386 
388  bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
389 
390  explicit Peer(NodeId id, ServiceFlags our_services)
391  : m_id{id}
392  , m_our_services{our_services}
393  {}
394 
395 private:
396  mutable Mutex m_tx_relay_mutex;
397 
399  std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
400 };
401 
402 using PeerRef = std::shared_ptr<Peer>;
403 
410 struct CNodeState {
412  const CBlockIndex* pindexBestKnownBlock{nullptr};
414  uint256 hashLastUnknownBlock{};
416  const CBlockIndex* pindexLastCommonBlock{nullptr};
418  const CBlockIndex* pindexBestHeaderSent{nullptr};
420  bool fSyncStarted{false};
422  std::chrono::microseconds m_stalling_since{0us};
423  std::list<QueuedBlock> vBlocksInFlight;
425  std::chrono::microseconds m_downloading_since{0us};
427  bool fPreferredDownload{false};
429  bool m_requested_hb_cmpctblocks{false};
431  bool m_provides_cmpctblocks{false};
432 
457  struct ChainSyncTimeoutState {
459  std::chrono::seconds m_timeout{0s};
461  const CBlockIndex* m_work_header{nullptr};
463  bool m_sent_getheaders{false};
465  bool m_protect{false};
466  };
467 
468  ChainSyncTimeoutState m_chain_sync;
469 
471  int64_t m_last_block_announcement{0};
472 
474  const bool m_is_inbound;
475 
476  CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
477 };
478 
479 class PeerManagerImpl final : public PeerManager
480 {
481 public:
482  PeerManagerImpl(CConnman& connman, AddrMan& addrman,
483  BanMan* banman, ChainstateManager& chainman,
484  CTxMemPool& pool, Options opts);
485 
487  void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
488  EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
489  void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
490  EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
491  void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
492  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
493  void BlockChecked(const CBlock& block, const BlockValidationState& state) override
494  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
495  void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
496  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
497 
499  void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
500  void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex);
501  bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
502  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
503  bool SendMessages(CNode* pto) override
504  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, g_msgproc_mutex);
505 
507  void StartScheduledTasks(CScheduler& scheduler) override;
508  void CheckForStaleTipAndEvictPeers() override;
509  std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
510  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
511  bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
512  bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
513  void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
514  void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
515  void SetBestHeight(int height) override { m_best_height = height; };
516  void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
517  void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
518  const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
519  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
520  void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
521 
522 private:
524  void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
525 
527  void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
528 
530  void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
531 
534  PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
535 
538  PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
539 
544  void Misbehaving(Peer& peer, int howmuch, const std::string& message);
545 
556  bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
557  bool via_compact_block, const std::string& message = "")
558  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
559 
565  bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
566  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
567 
574  bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
575 
587  bool ProcessOrphanTx(Peer& peer)
588  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
589 
597  void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
598  std::vector<CBlockHeader>&& headers,
599  bool via_compact_block)
600  EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
603  bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
605  arith_uint256 GetAntiDoSWorkThreshold();
609  void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
611  bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
630  bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
631  std::vector<CBlockHeader>& headers)
632  EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
644  bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
645  const CBlockIndex* chain_start_header,
646  std::vector<CBlockHeader>& headers)
647  EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
648 
651  bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
652 
657  bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
659  void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
661  void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
662  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
663 
664  void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
665 
669  void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
671 
673  void PushNodeVersion(CNode& pnode, const Peer& peer);
674 
679  void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
680 
682  void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
683 
685  void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
686 
694  void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
695 
697  void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
698 
699  FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
700 
701  FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
702 
703  const CChainParams& m_chainparams;
704  CConnman& m_connman;
705  AddrMan& m_addrman;
707  BanMan* const m_banman;
708  ChainstateManager& m_chainman;
709  CTxMemPool& m_mempool;
710  TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
711  std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
712 
714  std::atomic<int> m_best_height{-1};
715 
717  std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
718 
719  const Options m_opts;
720 
721  bool RejectIncomingTxs(const CNode& peer) const;
722 
725  bool m_initial_sync_finished GUARDED_BY(cs_main){false};
726 
729  mutable Mutex m_peer_mutex;
736  std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
737 
739  std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
740 
742  const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
744  CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
745 
746  uint32_t GetFetchFlags(const Peer& peer) const;
747 
748  std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
749 
751  int nSyncStarted GUARDED_BY(cs_main) = 0;
752 
754  uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
755 
762  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
763 
765  std::atomic<int> m_wtxid_relay_peers{0};
766 
768  int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
769 
771  int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
772 
774  std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
775 
776  bool AlreadyHaveTx(const GenTxid& gtxid)
777  EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
778 
813  CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
814  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
815 
816  /*
817  * Filter for transactions that have been recently confirmed.
818  * We use this to avoid requesting transactions that have already been
819  * confirnmed.
820  *
821  * Blocks don't typically have more than 4000 transactions, so this should
822  * be at least six blocks (~1 hr) worth of transactions that we can store,
823  * inserting both a txid and wtxid for every observed transaction.
824  * If the number of transactions appearing in a block goes up, or if we are
825  * seeing getdata requests more than an hour after initial announcement, we
826  * can increase this number.
827  * The false positive rate of 1/1M should come out to less than 1
828  * transaction per day that would be inadvertently ignored (which is the
829  * same probability that we have in the reject filter).
830  */
831  Mutex m_recent_confirmed_transactions_mutex;
832  CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
833 
840  std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
841  std::chrono::seconds average_interval);
842 
843 
844  // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
845  Mutex m_most_recent_block_mutex;
846  std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
847  std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
848  uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
849  std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
850 
851  // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
853  Mutex m_headers_presync_mutex;
861  using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
863  std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
865  NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
867  std::atomic_bool m_headers_presync_should_signal{false};
868 
870  int m_highest_fast_announce GUARDED_BY(::cs_main){0};
871 
873  bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
874 
876  bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
877 
885  void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
886 
887  /* Mark a block as in flight
888  * Returns false, still setting pit, if the block was already in flight from the same peer
889  * pit will only be valid as long as the same cs_main lock is being held
890  */
891  bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
892 
893  bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
894 
898  void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
899 
901  void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
902 
930  void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
931 
932  /* Multimap used to preserve insertion order */
933  typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
934  BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
935 
937  std::atomic<std::chrono::seconds> m_last_tip_update{0s};
938 
940  CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
942 
943  void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
944  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
946 
948  void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
949 
951  void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
952  EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
953 
960  void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
961 
963  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
964 
966  int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
967 
969  TxOrphanage m_orphanage;
970 
971  void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
972 
976  std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
978  size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
979 
981  void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
983  void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
984  bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
985 
992  bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
993  bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
994  void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
995  EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
996 
1012  bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
1013  BlockFilterType filter_type, uint32_t start_height,
1014  const uint256& stop_hash, uint32_t max_height_diff,
1015  const CBlockIndex*& stop_index,
1016  BlockFilterIndex*& filter_index);
1017 
1027  void ProcessGetCFilters(CNode& node, Peer& peer, CDataStream& vRecv);
1028 
1038  void ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv);
1039 
1049  void ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv);
1050 
1057  bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1058 
1059  void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1060  void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1061 };
1062 
1063 const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1064 {
1065  std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1066  if (it == m_node_states.end())
1067  return nullptr;
1068  return &it->second;
1069 }
1070 
1072 {
1073  return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1074 }
1075 
1081 static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1082 {
1083  return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1084 }
1085 
1086 void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1087 {
1088  assert(peer.m_addr_known);
1089  peer.m_addr_known->insert(addr.GetKey());
1090 }
1091 
1092 void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1093 {
1094  // Known checking here is only to save space from duplicates.
1095  // Before sending, we'll filter it again for known addresses that were
1096  // added after addresses were pushed.
1097  assert(peer.m_addr_known);
1098  if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1099  if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1100  peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1101  } else {
1102  peer.m_addrs_to_send.push_back(addr);
1103  }
1104  }
1105 }
1106 
1107 static void AddKnownTx(Peer& peer, const uint256& hash)
1108 {
1109  auto tx_relay = peer.GetTxRelay();
1110  if (!tx_relay) return;
1111 
1112  LOCK(tx_relay->m_tx_inventory_mutex);
1113  tx_relay->m_tx_inventory_known_filter.insert(hash);
1114 }
1115 
1117 static bool CanServeBlocks(const Peer& peer)
1118 {
1119  return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1120 }
1121 
1124 static bool IsLimitedPeer(const Peer& peer)
1125 {
1126  return (!(peer.m_their_services & NODE_NETWORK) &&
1127  (peer.m_their_services & NODE_NETWORK_LIMITED));
1128 }
1129 
1131 static bool CanServeWitnesses(const Peer& peer)
1132 {
1133  return peer.m_their_services & NODE_WITNESS;
1134 }
1135 
1136 std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1137  std::chrono::seconds average_interval)
1138 {
1139  if (m_next_inv_to_inbounds.load() < now) {
1140  // If this function were called from multiple threads simultaneously
1141  // it would possible that both update the next send variable, and return a different result to their caller.
1142  // This is not possible in practice as only the net processing thread invokes this function.
1143  m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
1144  }
1145  return m_next_inv_to_inbounds;
1146 }
1147 
1148 bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1149 {
1150  return mapBlocksInFlight.count(hash);
1151 }
1152 
1153 bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1154 {
1155  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1156  auto [nodeid, block_it] = range.first->second;
1157  CNodeState& nodestate = *Assert(State(nodeid));
1158  if (!nodestate.m_is_inbound) return true;
1159  }
1160 
1161  return false;
1162 }
1163 
1164 void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1165 {
1166  auto range = mapBlocksInFlight.equal_range(hash);
1167  if (range.first == range.second) {
1168  // Block was not requested from any peer
1169  return;
1170  }
1171 
1172  // We should not have requested too many of this block
1173  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1174 
1175  while (range.first != range.second) {
1176  auto [node_id, list_it] = range.first->second;
1177 
1178  if (from_peer && *from_peer != node_id) {
1179  range.first++;
1180  continue;
1181  }
1182 
1183  CNodeState& state = *Assert(State(node_id));
1184 
1185  if (state.vBlocksInFlight.begin() == list_it) {
1186  // First block on the queue was received, update the start download time for the next one
1187  state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1188  }
1189  state.vBlocksInFlight.erase(list_it);
1190 
1191  if (state.vBlocksInFlight.empty()) {
1192  // Last validated block on the queue for this peer was received.
1193  m_peers_downloading_from--;
1194  }
1195  state.m_stalling_since = 0us;
1196 
1197  range.first = mapBlocksInFlight.erase(range.first);
1198  }
1199 }
1200 
1201 bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1202 {
1203  const uint256& hash{block.GetBlockHash()};
1204 
1205  CNodeState *state = State(nodeid);
1206  assert(state != nullptr);
1207 
1208  Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1209 
1210  // Short-circuit most stuff in case it is from the same node
1211  for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1212  if (range.first->second.first == nodeid) {
1213  if (pit) {
1214  *pit = &range.first->second.second;
1215  }
1216  return false;
1217  }
1218  }
1219 
1220  // Make sure it's not being fetched already from same peer.
1221  RemoveBlockRequest(hash, nodeid);
1222 
1223  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1224  {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1225  if (state->vBlocksInFlight.size() == 1) {
1226  // We're starting a block download (batch) from this peer.
1227  state->m_downloading_since = GetTime<std::chrono::microseconds>();
1228  m_peers_downloading_from++;
1229  }
1230  auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1231  if (pit) {
1232  *pit = &itInFlight->second.second;
1233  }
1234  return true;
1235 }
1236 
1237 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1238 {
1240 
1241  // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1242  // mempool will not contain the transactions necessary to reconstruct the
1243  // compact block.
1244  if (m_opts.ignore_incoming_txs) return;
1245 
1246  CNodeState* nodestate = State(nodeid);
1247  if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1248  // Don't request compact blocks if the peer has not signalled support
1249  return;
1250  }
1251 
1252  int num_outbound_hb_peers = 0;
1253  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1254  if (*it == nodeid) {
1255  lNodesAnnouncingHeaderAndIDs.erase(it);
1256  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1257  return;
1258  }
1259  CNodeState *state = State(*it);
1260  if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
1261  }
1262  if (nodestate->m_is_inbound) {
1263  // If we're adding an inbound HB peer, make sure we're not removing
1264  // our last outbound HB peer in the process.
1265  if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1266  CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
1267  if (remove_node != nullptr && !remove_node->m_is_inbound) {
1268  // Put the HB outbound peer in the second slot, so that it
1269  // doesn't get removed.
1270  std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1271  }
1272  }
1273  }
1274  m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1276  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1277  // As per BIP152, we only get 3 of our peers to announce
1278  // blocks using compact encodings.
1279  m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1280  m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION));
1281  // save BIP152 bandwidth state: we select peer to be low-bandwidth
1282  pnodeStop->m_bip152_highbandwidth_to = false;
1283  return true;
1284  });
1285  lNodesAnnouncingHeaderAndIDs.pop_front();
1286  }
1287  m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION));
1288  // save BIP152 bandwidth state: we select peer to be high-bandwidth
1289  pfrom->m_bip152_highbandwidth_to = true;
1290  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1291  return true;
1292  });
1293 }
1294 
1295 bool PeerManagerImpl::TipMayBeStale()
1296 {
1298  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1299  if (m_last_tip_update.load() == 0s) {
1300  m_last_tip_update = GetTime<std::chrono::seconds>();
1301  }
1302  return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1303 }
1304 
1305 bool PeerManagerImpl::CanDirectFetch()
1306 {
1307  return m_chainman.ActiveChain().Tip()->Time() > GetAdjustedTime() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1308 }
1309 
1310 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1311 {
1312  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1313  return true;
1314  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1315  return true;
1316  return false;
1317 }
1318 
1319 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1320  CNodeState *state = State(nodeid);
1321  assert(state != nullptr);
1322 
1323  if (!state->hashLastUnknownBlock.IsNull()) {
1324  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1325  if (pindex && pindex->nChainWork > 0) {
1326  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1327  state->pindexBestKnownBlock = pindex;
1328  }
1329  state->hashLastUnknownBlock.SetNull();
1330  }
1331  }
1332 }
1333 
1334 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1335  CNodeState *state = State(nodeid);
1336  assert(state != nullptr);
1337 
1338  ProcessBlockAvailability(nodeid);
1339 
1340  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1341  if (pindex && pindex->nChainWork > 0) {
1342  // An actually better block was announced.
1343  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1344  state->pindexBestKnownBlock = pindex;
1345  }
1346  } else {
1347  // An unknown block was announced; just assume that the latest one is the best one.
1348  state->hashLastUnknownBlock = hash;
1349  }
1350 }
1351 
1352 // Logic for calculating which blocks to download from a given peer, given our current tip.
1353 void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1354 {
1355  if (count == 0)
1356  return;
1357 
1358  vBlocks.reserve(vBlocks.size() + count);
1359  CNodeState *state = State(peer.m_id);
1360  assert(state != nullptr);
1361 
1362  // Make sure pindexBestKnownBlock is up to date, we'll need it.
1363  ProcessBlockAvailability(peer.m_id);
1364 
1365  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1366  // This peer has nothing interesting.
1367  return;
1368  }
1369 
1370  if (state->pindexLastCommonBlock == nullptr) {
1371  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1372  // Guessing wrong in either direction is not a problem.
1373  state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1374  }
1375 
1376  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1377  // of its current tip anymore. Go back enough to fix that.
1378  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1379  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1380  return;
1381 
1382  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1383  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1384  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1385  // download that next block if the window were 1 larger.
1386  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1387 
1388  FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1389 }
1390 
1391 void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1392 {
1393  Assert(from_tip);
1394  Assert(target_block);
1395 
1396  if (vBlocks.size() >= count) {
1397  return;
1398  }
1399 
1400  vBlocks.reserve(count);
1401  CNodeState *state = Assert(State(peer.m_id));
1402 
1403  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1404  // This peer can't provide us the complete series of blocks leading up to the
1405  // assumeutxo snapshot base.
1406  //
1407  // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1408  // will eventually crash when we try to reorg to it. Let other logic
1409  // deal with whether we disconnect this peer.
1410  //
1411  // TODO at some point in the future, we might choose to request what blocks
1412  // this peer does have from the historical chain, despite it not having a
1413  // complete history beneath the snapshot base.
1414  return;
1415  }
1416 
1417  FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1418 }
1419 
1420 void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1421 {
1422  std::vector<const CBlockIndex*> vToFetch;
1423  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1424  NodeId waitingfor = -1;
1425  while (pindexWalk->nHeight < nMaxHeight) {
1426  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1427  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1428  // as iterating over ~100 CBlockIndex* entries anyway.
1429  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1430  vToFetch.resize(nToFetch);
1431  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1432  vToFetch[nToFetch - 1] = pindexWalk;
1433  for (unsigned int i = nToFetch - 1; i > 0; i--) {
1434  vToFetch[i - 1] = vToFetch[i]->pprev;
1435  }
1436 
1437  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1438  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1439  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1440  // already part of our chain (and therefore don't need it even if pruned).
1441  for (const CBlockIndex* pindex : vToFetch) {
1442  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1443  // We consider the chain that this peer is on invalid.
1444  return;
1445  }
1446  if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1447  // We wouldn't download this block or its descendants from this peer.
1448  return;
1449  }
1450  if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1451  if (activeChain && pindex->HaveNumChainTxs())
1452  state->pindexLastCommonBlock = pindex;
1453  } else if (!IsBlockRequested(pindex->GetBlockHash())) {
1454  // The block is not already downloaded, and not yet in flight.
1455  if (pindex->nHeight > nWindowEnd) {
1456  // We reached the end of the window.
1457  if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1458  // We aren't able to fetch anything, but we would be if the download window was one larger.
1459  if (nodeStaller) *nodeStaller = waitingfor;
1460  }
1461  return;
1462  }
1463  vBlocks.push_back(pindex);
1464  if (vBlocks.size() == count) {
1465  return;
1466  }
1467  } else if (waitingfor == -1) {
1468  // This is the first already-in-flight block.
1469  waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1470  }
1471  }
1472  }
1473 }
1474 
1475 } // namespace
1476 
1477 void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1478 {
1479  uint64_t my_services{peer.m_our_services};
1480  const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1481  uint64_t nonce = pnode.GetLocalNonce();
1482  const int nNodeStartingHeight{m_best_height};
1483  NodeId nodeid = pnode.GetId();
1484  CAddress addr = pnode.addr;
1485 
1486  CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1487  uint64_t your_services{addr.nServices};
1488 
1489  const bool tx_relay{!RejectIncomingTxs(pnode)};
1490  m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1491  your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1492  my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1493  nonce, strSubVersion, nNodeStartingHeight, tx_relay));
1494 
1495  if (fLogIPs) {
1496  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1497  } else {
1498  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1499  }
1500 }
1501 
1502 void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
1503 {
1504  AssertLockHeld(::cs_main); // For m_txrequest
1505  NodeId nodeid = node.GetId();
1506  if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
1507  // Too many queued announcements from this peer
1508  return;
1509  }
1510  const CNodeState* state = State(nodeid);
1511 
1512  // Decide the TxRequestTracker parameters for this announcement:
1513  // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission)
1514  // - "reqtime": current time plus delays for:
1515  // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
1516  // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
1517  // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
1518  // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
1519  auto delay{0us};
1520  const bool preferred = state->fPreferredDownload;
1521  if (!preferred) delay += NONPREF_PEER_TX_DELAY;
1522  if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
1523  const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) &&
1524  m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
1525  if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
1526  m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
1527 }
1528 
1529 void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1530 {
1531  LOCK(cs_main);
1532  CNodeState *state = State(node);
1533  if (state) state->m_last_block_announcement = time_in_seconds;
1534 }
1535 
1536 void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services)
1537 {
1538  NodeId nodeid = node.GetId();
1539  {
1540  LOCK(cs_main);
1541  m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
1542  assert(m_txrequest.Count(nodeid) == 0);
1543  }
1544  PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
1545  {
1546  LOCK(m_peer_mutex);
1547  m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1548  }
1549  if (!node.IsInboundConn()) {
1550  PushNodeVersion(node, *peer);
1551  }
1552 }
1553 
1554 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1555 {
1556  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1557 
1558  for (const auto& txid : unbroadcast_txids) {
1559  CTransactionRef tx = m_mempool.get(txid);
1560 
1561  if (tx != nullptr) {
1562  RelayTransaction(txid, tx->GetWitnessHash());
1563  } else {
1564  m_mempool.RemoveUnbroadcastTx(txid, true);
1565  }
1566  }
1567 
1568  // Schedule next run for 10-15 minutes in the future.
1569  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1570  const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1571  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1572 }
1573 
1574 void PeerManagerImpl::FinalizeNode(const CNode& node)
1575 {
1576  NodeId nodeid = node.GetId();
1577  int misbehavior{0};
1578  {
1579  LOCK(cs_main);
1580  {
1581  // We remove the PeerRef from g_peer_map here, but we don't always
1582  // destruct the Peer. Sometimes another thread is still holding a
1583  // PeerRef, so the refcount is >= 1. Be careful not to do any
1584  // processing here that assumes Peer won't be changed before it's
1585  // destructed.
1586  PeerRef peer = RemovePeer(nodeid);
1587  assert(peer != nullptr);
1588  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1589  m_wtxid_relay_peers -= peer->m_wtxid_relay;
1590  assert(m_wtxid_relay_peers >= 0);
1591  }
1592  CNodeState *state = State(nodeid);
1593  assert(state != nullptr);
1594 
1595  if (state->fSyncStarted)
1596  nSyncStarted--;
1597 
1598  for (const QueuedBlock& entry : state->vBlocksInFlight) {
1599  auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1600  while (range.first != range.second) {
1601  auto [node_id, list_it] = range.first->second;
1602  if (node_id != nodeid) {
1603  range.first++;
1604  } else {
1605  range.first = mapBlocksInFlight.erase(range.first);
1606  }
1607  }
1608  }
1609  m_orphanage.EraseForPeer(nodeid);
1610  m_txrequest.DisconnectedPeer(nodeid);
1611  if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1612  m_num_preferred_download_peers -= state->fPreferredDownload;
1613  m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1614  assert(m_peers_downloading_from >= 0);
1615  m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1616  assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1617 
1618  m_node_states.erase(nodeid);
1619 
1620  if (m_node_states.empty()) {
1621  // Do a consistency check after the last peer is removed.
1622  assert(mapBlocksInFlight.empty());
1623  assert(m_num_preferred_download_peers == 0);
1624  assert(m_peers_downloading_from == 0);
1625  assert(m_outbound_peers_with_protect_from_disconnect == 0);
1626  assert(m_wtxid_relay_peers == 0);
1627  assert(m_txrequest.Size() == 0);
1628  assert(m_orphanage.Size() == 0);
1629  }
1630  } // cs_main
1631  if (node.fSuccessfullyConnected && misbehavior == 0 &&
1632  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1633  // Only change visible addrman state for full outbound peers. We don't
1634  // call Connected() for feeler connections since they don't have
1635  // fSuccessfullyConnected set.
1636  m_addrman.Connected(node.addr);
1637  }
1638  {
1639  LOCK(m_headers_presync_mutex);
1640  m_headers_presync_stats.erase(nodeid);
1641  }
1642  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1643 }
1644 
1645 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1646 {
1647  LOCK(m_peer_mutex);
1648  auto it = m_peer_map.find(id);
1649  return it != m_peer_map.end() ? it->second : nullptr;
1650 }
1651 
1652 PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1653 {
1654  PeerRef ret;
1655  LOCK(m_peer_mutex);
1656  auto it = m_peer_map.find(id);
1657  if (it != m_peer_map.end()) {
1658  ret = std::move(it->second);
1659  m_peer_map.erase(it);
1660  }
1661  return ret;
1662 }
1663 
1664 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1665 {
1666  {
1667  LOCK(cs_main);
1668  const CNodeState* state = State(nodeid);
1669  if (state == nullptr)
1670  return false;
1671  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1672  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1673  for (const QueuedBlock& queue : state->vBlocksInFlight) {
1674  if (queue.pindex)
1675  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1676  }
1677  }
1678 
1679  PeerRef peer = GetPeerRef(nodeid);
1680  if (peer == nullptr) return false;
1681  stats.their_services = peer->m_their_services;
1682  stats.m_starting_height = peer->m_starting_height;
1683  // It is common for nodes with good ping times to suddenly become lagged,
1684  // due to a new block arriving or other large transfer.
1685  // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1686  // since pingtime does not update until the ping is complete, which might take a while.
1687  // So, if a ping is taking an unusually long time in flight,
1688  // the caller can immediately detect that this is happening.
1689  auto ping_wait{0us};
1690  if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1691  ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1692  }
1693 
1694  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1695  stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1696  stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1697  } else {
1698  stats.m_relay_txs = false;
1699  stats.m_fee_filter_received = 0;
1700  }
1701 
1702  stats.m_ping_wait = ping_wait;
1703  stats.m_addr_processed = peer->m_addr_processed.load();
1704  stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1705  stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1706  {
1707  LOCK(peer->m_headers_sync_mutex);
1708  if (peer->m_headers_sync) {
1709  stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1710  }
1711  }
1712 
1713  return true;
1714 }
1715 
1716 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1717 {
1718  if (m_opts.max_extra_txs <= 0)
1719  return;
1720  if (!vExtraTxnForCompact.size())
1721  vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1722  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1723  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1724 }
1725 
1726 void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message)
1727 {
1728  assert(howmuch > 0);
1729 
1730  LOCK(peer.m_misbehavior_mutex);
1731  const int score_before{peer.m_misbehavior_score};
1732  peer.m_misbehavior_score += howmuch;
1733  const int score_now{peer.m_misbehavior_score};
1734 
1735  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1736  std::string warning;
1737 
1738  if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
1739  warning = " DISCOURAGE THRESHOLD EXCEEDED";
1740  peer.m_should_discourage = true;
1741  }
1742 
1743  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
1744  peer.m_id, score_before, score_now, warning, message_prefixed);
1745 }
1746 
1747 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1748  bool via_compact_block, const std::string& message)
1749 {
1750  PeerRef peer{GetPeerRef(nodeid)};
1751  switch (state.GetResult()) {
1753  break;
1755  // We didn't try to process the block because the header chain may have
1756  // too little work.
1757  break;
1758  // The node is providing invalid data:
1761  if (!via_compact_block) {
1762  if (peer) Misbehaving(*peer, 100, message);
1763  return true;
1764  }
1765  break;
1767  {
1768  LOCK(cs_main);
1769  CNodeState *node_state = State(nodeid);
1770  if (node_state == nullptr) {
1771  break;
1772  }
1773 
1774  // Discourage outbound (but not inbound) peers if on an invalid chain.
1775  // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1776  if (!via_compact_block && !node_state->m_is_inbound) {
1777  if (peer) Misbehaving(*peer, 100, message);
1778  return true;
1779  }
1780  break;
1781  }
1785  if (peer) Misbehaving(*peer, 100, message);
1786  return true;
1787  // Conflicting (but not necessarily invalid) data or different policy:
1789  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1790  if (peer) Misbehaving(*peer, 10, message);
1791  return true;
1794  break;
1795  }
1796  if (message != "") {
1797  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1798  }
1799  return false;
1800 }
1801 
1802 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
1803 {
1804  PeerRef peer{GetPeerRef(nodeid)};
1805  switch (state.GetResult()) {
1807  break;
1808  // The node is providing invalid data:
1810  if (peer) Misbehaving(*peer, 100, "");
1811  return true;
1812  // Conflicting (but not necessarily invalid) data or different policy:
1823  break;
1824  }
1825  return false;
1826 }
1827 
1828 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1829 {
1831  if (m_chainman.ActiveChain().Contains(pindex)) return true;
1832  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1833  (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1834  (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1835 }
1836 
1837 std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1838 {
1839  if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
1840 
1841  // Ensure this peer exists and hasn't been disconnected
1842  PeerRef peer = GetPeerRef(peer_id);
1843  if (peer == nullptr) return "Peer does not exist";
1844 
1845  // Ignore pre-segwit peers
1846  if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
1847 
1848  LOCK(cs_main);
1849 
1850  // Forget about all prior requests
1851  RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1852 
1853  // Mark block as in-flight
1854  if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
1855 
1856  // Construct message to request the block
1857  const uint256& hash{block_index.GetBlockHash()};
1858  std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1859 
1860  // Send block request message to the peer
1861  bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1862  const CNetMsgMaker msgMaker(node->GetCommonVersion());
1863  this->m_connman.PushMessage(node, msgMaker.Make(NetMsgType::GETDATA, invs));
1864  return true;
1865  });
1866 
1867  if (!success) return "Peer not fully connected";
1868 
1869  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1870  hash.ToString(), peer_id);
1871  return std::nullopt;
1872 }
1873 
1874 std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1875  BanMan* banman, ChainstateManager& chainman,
1876  CTxMemPool& pool, Options opts)
1877 {
1878  return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, opts);
1879 }
1880 
1881 PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
1882  BanMan* banman, ChainstateManager& chainman,
1883  CTxMemPool& pool, Options opts)
1884  : m_rng{opts.deterministic_rng},
1885  m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
1886  m_chainparams(chainman.GetParams()),
1887  m_connman(connman),
1888  m_addrman(addrman),
1889  m_banman(banman),
1890  m_chainman(chainman),
1891  m_mempool(pool),
1892  m_opts{opts}
1893 {
1894  // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
1895  // This argument can go away after Erlay support is complete.
1896  if (opts.reconcile_txs) {
1897  m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
1898  }
1899 }
1900 
1901 void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
1902 {
1903  // Stale tip checking and peer eviction are on two different timers, but we
1904  // don't want them to get out of sync due to drift in the scheduler, so we
1905  // combine them in one function and schedule at the quicker (peer-eviction)
1906  // timer.
1907  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1908  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1909 
1910  // schedule next run for 10-15 minutes in the future
1911  const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1912  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1913 }
1914 
1921 void PeerManagerImpl::BlockConnected(
1922  ChainstateRole role,
1923  const std::shared_ptr<const CBlock>& pblock,
1924  const CBlockIndex* pindex)
1925 {
1926  // Update this for all chainstate roles so that we don't mistakenly see peers
1927  // helping us do background IBD as having a stale tip.
1928  m_last_tip_update = GetTime<std::chrono::seconds>();
1929 
1930  // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
1931  auto stalling_timeout = m_block_stalling_timeout.load();
1932  Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
1933  if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
1934  const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
1935  if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1936  LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
1937  }
1938  }
1939 
1940  // The following task can be skipped since we don't maintain a mempool for
1941  // the ibd/background chainstate.
1942  if (role == ChainstateRole::BACKGROUND) {
1943  return;
1944  }
1945  m_orphanage.EraseForBlock(*pblock);
1946 
1947  {
1948  LOCK(m_recent_confirmed_transactions_mutex);
1949  for (const auto& ptx : pblock->vtx) {
1950  m_recent_confirmed_transactions.insert(ptx->GetHash());
1951  if (ptx->GetHash() != ptx->GetWitnessHash()) {
1952  m_recent_confirmed_transactions.insert(ptx->GetWitnessHash());
1953  }
1954  }
1955  }
1956  {
1957  LOCK(cs_main);
1958  for (const auto& ptx : pblock->vtx) {
1959  m_txrequest.ForgetTxHash(ptx->GetHash());
1960  m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1961  }
1962  }
1963 }
1964 
1965 void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1966 {
1967  // To avoid relay problems with transactions that were previously
1968  // confirmed, clear our filter of recently confirmed transactions whenever
1969  // there's a reorg.
1970  // This means that in a 1-block reorg (where 1 block is disconnected and
1971  // then another block reconnected), our filter will drop to having only one
1972  // block's worth of transactions in it, but that should be fine, since
1973  // presumably the most common case of relaying a confirmed transaction
1974  // should be just after a new block containing it is found.
1975  LOCK(m_recent_confirmed_transactions_mutex);
1976  m_recent_confirmed_transactions.reset();
1977 }
1978 
1983 void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
1984 {
1985  auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
1986  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1987 
1988  LOCK(cs_main);
1989 
1990  if (pindex->nHeight <= m_highest_fast_announce)
1991  return;
1992  m_highest_fast_announce = pindex->nHeight;
1993 
1994  if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
1995 
1996  uint256 hashBlock(pblock->GetHash());
1997  const std::shared_future<CSerializedNetMsg> lazy_ser{
1998  std::async(std::launch::deferred, [&] { return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
1999 
2000  {
2001  auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
2002  for (const auto& tx : pblock->vtx) {
2003  most_recent_block_txs->emplace(tx->GetHash(), tx);
2004  most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2005  }
2006 
2007  LOCK(m_most_recent_block_mutex);
2008  m_most_recent_block_hash = hashBlock;
2009  m_most_recent_block = pblock;
2010  m_most_recent_compact_block = pcmpctblock;
2011  m_most_recent_block_txs = std::move(most_recent_block_txs);
2012  }
2013 
2014  m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2016 
2017  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
2018  return;
2019  ProcessBlockAvailability(pnode->GetId());
2020  CNodeState &state = *State(pnode->GetId());
2021  // If the peer has, or we announced to them the previous block already,
2022  // but we don't think they have this one, go ahead and announce it
2023  if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2024 
2025  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2026  hashBlock.ToString(), pnode->GetId());
2027 
2028  const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2029  m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
2030  state.pindexBestHeaderSent = pindex;
2031  }
2032  });
2033 }
2034 
2039 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2040 {
2041  SetBestHeight(pindexNew->nHeight);
2042  SetServiceFlagsIBDCache(!fInitialDownload);
2043 
2044  // Don't relay inventory during initial block download.
2045  if (fInitialDownload) return;
2046 
2047  // Find the hashes of all blocks that weren't previously in the best chain.
2048  std::vector<uint256> vHashes;
2049  const CBlockIndex *pindexToAnnounce = pindexNew;
2050  while (pindexToAnnounce != pindexFork) {
2051  vHashes.push_back(pindexToAnnounce->GetBlockHash());
2052  pindexToAnnounce = pindexToAnnounce->pprev;
2053  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2054  // Limit announcements in case of a huge reorganization.
2055  // Rely on the peer's synchronization mechanism in that case.
2056  break;
2057  }
2058  }
2059 
2060  {
2061  LOCK(m_peer_mutex);
2062  for (auto& it : m_peer_map) {
2063  Peer& peer = *it.second;
2064  LOCK(peer.m_block_inv_mutex);
2065  for (const uint256& hash : reverse_iterate(vHashes)) {
2066  peer.m_blocks_for_headers_relay.push_back(hash);
2067  }
2068  }
2069  }
2070 
2071  m_connman.WakeMessageHandler();
2072 }
2073 
2078 void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
2079 {
2080  LOCK(cs_main);
2081 
2082  const uint256 hash(block.GetHash());
2083  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2084 
2085  // If the block failed validation, we know where it came from and we're still connected
2086  // to that peer, maybe punish.
2087  if (state.IsInvalid() &&
2088  it != mapBlockSource.end() &&
2089  State(it->second.first)) {
2090  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2091  }
2092  // Check that:
2093  // 1. The block is valid
2094  // 2. We're not in initial block download
2095  // 3. This is currently the best block we're aware of. We haven't updated
2096  // the tip yet so we have no way to check this directly here. Instead we
2097  // just check that there are currently no other blocks in flight.
2098  else if (state.IsValid() &&
2099  !m_chainman.IsInitialBlockDownload() &&
2100  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2101  if (it != mapBlockSource.end()) {
2102  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2103  }
2104  }
2105  if (it != mapBlockSource.end())
2106  mapBlockSource.erase(it);
2107 }
2108 
2110 //
2111 // Messages
2112 //
2113 
2114 
2115 bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
2116 {
2117  if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
2118  // If the chain tip has changed previously rejected transactions
2119  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
2120  // or a double-spend. Reset the rejects filter and give those
2121  // txs a second chance.
2122  hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
2123  m_recent_rejects.reset();
2124  }
2125 
2126  const uint256& hash = gtxid.GetHash();
2127 
2128  if (m_orphanage.HaveTx(gtxid)) return true;
2129 
2130  {
2131  LOCK(m_recent_confirmed_transactions_mutex);
2132  if (m_recent_confirmed_transactions.contains(hash)) return true;
2133  }
2134 
2135  return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
2136 }
2137 
2138 bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2139 {
2140  return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2141 }
2142 
2143 void PeerManagerImpl::SendPings()
2144 {
2145  LOCK(m_peer_mutex);
2146  for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2147 }
2148 
2149 void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2150 {
2151  LOCK(m_peer_mutex);
2152  for(auto& it : m_peer_map) {
2153  Peer& peer = *it.second;
2154  auto tx_relay = peer.GetTxRelay();
2155  if (!tx_relay) continue;
2156 
2157  LOCK(tx_relay->m_tx_inventory_mutex);
2158  // Only queue transactions for announcement once the version handshake
2159  // is completed. The time of arrival for these transactions is
2160  // otherwise at risk of leaking to a spy, if the spy is able to
2161  // distinguish transactions received during the handshake from the rest
2162  // in the announcement.
2163  if (tx_relay->m_next_inv_send_time == 0s) continue;
2164 
2165  const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2166  if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2167  tx_relay->m_tx_inventory_to_send.insert(hash);
2168  }
2169  };
2170 }
2171 
2172 void PeerManagerImpl::RelayAddress(NodeId originator,
2173  const CAddress& addr,
2174  bool fReachable)
2175 {
2176  // We choose the same nodes within a given 24h window (if the list of connected
2177  // nodes does not change) and we don't relay to nodes that already know an
2178  // address. So within 24h we will likely relay a given address once. This is to
2179  // prevent a peer from unjustly giving their address better propagation by sending
2180  // it to us repeatedly.
2181 
2182  if (!fReachable && !addr.IsRelayable()) return;
2183 
2184  // Relay to a limited number of other nodes
2185  // Use deterministic randomness to send to the same nodes for 24 hours
2186  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2187  const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2188  const auto current_time{GetTime<std::chrono::seconds>()};
2189  // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2190  const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2191  const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
2192  .Write(hash_addr)
2193  .Write(time_addr)};
2194 
2195  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2196  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2197 
2198  std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2199  assert(nRelayNodes <= best.size());
2200 
2201  LOCK(m_peer_mutex);
2202 
2203  for (auto& [id, peer] : m_peer_map) {
2204  if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2205  uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2206  for (unsigned int i = 0; i < nRelayNodes; i++) {
2207  if (hashKey > best[i].first) {
2208  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2209  best[i] = std::make_pair(hashKey, peer.get());
2210  break;
2211  }
2212  }
2213  }
2214  };
2215 
2216  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2217  PushAddress(*best[i].second, addr);
2218  }
2219 }
2220 
2221 void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2222 {
2223  std::shared_ptr<const CBlock> a_recent_block;
2224  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2225  {
2226  LOCK(m_most_recent_block_mutex);
2227  a_recent_block = m_most_recent_block;
2228  a_recent_compact_block = m_most_recent_compact_block;
2229  }
2230 
2231  bool need_activate_chain = false;
2232  {
2233  LOCK(cs_main);
2234  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2235  if (pindex) {
2236  if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2237  pindex->IsValid(BLOCK_VALID_TREE)) {
2238  // If we have the block and all of its parents, but have not yet validated it,
2239  // we might be in the middle of connecting it (ie in the unlock of cs_main
2240  // before ActivateBestChain but after AcceptBlock).
2241  // In this case, we need to run ActivateBestChain prior to checking the relay
2242  // conditions below.
2243  need_activate_chain = true;
2244  }
2245  }
2246  } // release cs_main before calling ActivateBestChain
2247  if (need_activate_chain) {
2248  BlockValidationState state;
2249  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2250  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2251  }
2252  }
2253 
2254  LOCK(cs_main);
2255  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2256  if (!pindex) {
2257  return;
2258  }
2259  if (!BlockRequestAllowed(pindex)) {
2260  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2261  return;
2262  }
2263  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2264  // disconnect node in case we have reached the outbound limit for serving historical blocks
2265  if (m_connman.OutboundTargetReached(true) &&
2266  (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2267  !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2268  ) {
2269  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
2270  pfrom.fDisconnect = true;
2271  return;
2272  }
2273  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2274  if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
2275  (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2276  )) {
2277  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
2278  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2279  pfrom.fDisconnect = true;
2280  return;
2281  }
2282  // Pruned nodes may have deleted the block, so check whether
2283  // it's available before trying to send.
2284  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2285  return;
2286  }
2287  std::shared_ptr<const CBlock> pblock;
2288  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2289  pblock = a_recent_block;
2290  } else if (inv.IsMsgWitnessBlk()) {
2291  // Fast-path: in this case it is possible to serve the block directly from disk,
2292  // as the network format matches the format on disk
2293  std::vector<uint8_t> block_data;
2294  if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, pindex->GetBlockPos())) {
2295  assert(!"cannot load block from disk");
2296  }
2297  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
2298  // Don't set pblock as we've sent the block
2299  } else {
2300  // Send block from disk
2301  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2302  if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
2303  assert(!"cannot load block from disk");
2304  }
2305  pblock = pblockRead;
2306  }
2307  if (pblock) {
2308  if (inv.IsMsgBlk()) {
2309  m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
2310  } else if (inv.IsMsgWitnessBlk()) {
2311  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
2312  } else if (inv.IsMsgFilteredBlk()) {
2313  bool sendMerkleBlock = false;
2314  CMerkleBlock merkleBlock;
2315  if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2316  LOCK(tx_relay->m_bloom_filter_mutex);
2317  if (tx_relay->m_bloom_filter) {
2318  sendMerkleBlock = true;
2319  merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2320  }
2321  }
2322  if (sendMerkleBlock) {
2323  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
2324  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2325  // This avoids hurting performance by pointlessly requiring a round-trip
2326  // Note that there is currently no way for a node to request any single transactions we didn't send here -
2327  // they must either disconnect and retry or request the full block.
2328  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2329  // however we MUST always provide at least what the remote peer needs
2330  typedef std::pair<unsigned int, uint256> PairType;
2331  for (PairType& pair : merkleBlock.vMatchedTxn)
2332  m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
2333  }
2334  // else
2335  // no response
2336  } else if (inv.IsMsgCmpctBlk()) {
2337  // If a peer is asking for old blocks, we're almost guaranteed
2338  // they won't have a useful mempool to match against a compact block,
2339  // and we don't feel like constructing the object for them, so
2340  // instead we respond with the full, non-compact block.
2341  if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
2342  if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2343  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
2344  } else {
2345  CBlockHeaderAndShortTxIDs cmpctblock{*pblock};
2346  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
2347  }
2348  } else {
2349  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
2350  }
2351  }
2352  }
2353 
2354  {
2355  LOCK(peer.m_block_inv_mutex);
2356  // Trigger the peer node to send a getblocks request for the next batch of inventory
2357  if (inv.hash == peer.m_continuation_block) {
2358  // Send immediately. This must send even if redundant,
2359  // and we want it right after the last block so they don't
2360  // wait for other stuff first.
2361  std::vector<CInv> vInv;
2362  vInv.emplace_back(MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash());
2363  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
2364  peer.m_continuation_block.SetNull();
2365  }
2366  }
2367 }
2368 
2369 CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2370 {
2371  // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2372  auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2373  if (txinfo.tx) {
2374  return std::move(txinfo.tx);
2375  }
2376 
2377  // Or it might be from the most recent block
2378  {
2379  LOCK(m_most_recent_block_mutex);
2380  if (m_most_recent_block_txs != nullptr) {
2381  auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2382  if (it != m_most_recent_block_txs->end()) return it->second;
2383  }
2384  }
2385 
2386  return {};
2387 }
2388 
2389 void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2390 {
2392 
2393  auto tx_relay = peer.GetTxRelay();
2394 
2395  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2396  std::vector<CInv> vNotFound;
2397  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2398 
2399  // Process as many TX items from the front of the getdata queue as
2400  // possible, since they're common and it's efficient to batch process
2401  // them.
2402  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2403  if (interruptMsgProc) return;
2404  // The send buffer provides backpressure. If there's no space in
2405  // the buffer, pause processing until the next call.
2406  if (pfrom.fPauseSend) break;
2407 
2408  const CInv &inv = *it++;
2409 
2410  if (tx_relay == nullptr) {
2411  // Ignore GETDATA requests for transactions from block-relay-only
2412  // peers and peers that asked us not to announce transactions.
2413  continue;
2414  }
2415 
2416  CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2417  if (tx) {
2418  // WTX and WITNESS_TX imply we serialize with witness
2419  int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
2420  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
2421  m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2422  } else {
2423  vNotFound.push_back(inv);
2424  }
2425  }
2426 
2427  // Only process one BLOCK item per call, since they're uncommon and can be
2428  // expensive to process.
2429  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2430  const CInv &inv = *it++;
2431  if (inv.IsGenBlkMsg()) {
2432  ProcessGetBlockData(pfrom, peer, inv);
2433  }
2434  // else: If the first item on the queue is an unknown type, we erase it
2435  // and continue processing the queue on the next call.
2436  }
2437 
2438  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2439 
2440  if (!vNotFound.empty()) {
2441  // Let the peer know that we didn't find what it asked for, so it doesn't
2442  // have to wait around forever.
2443  // SPV clients care about this message: it's needed when they are
2444  // recursively walking the dependencies of relevant unconfirmed
2445  // transactions. SPV clients want to do that because they want to know
2446  // about (and store and rebroadcast and risk analyze) the dependencies
2447  // of transactions relevant to them, without having to download the
2448  // entire memory pool.
2449  // Also, other nodes can use these messages to automatically request a
2450  // transaction from some other peer that annnounced it, and stop
2451  // waiting for us to respond.
2452  // In normal operation, we often send NOTFOUND messages for parents of
2453  // transactions that we relay; if a peer is missing a parent, they may
2454  // assume we have them and request the parents from us.
2455  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
2456  }
2457 }
2458 
2459 uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2460 {
2461  uint32_t nFetchFlags = 0;
2462  if (CanServeWitnesses(peer)) {
2463  nFetchFlags |= MSG_WITNESS_FLAG;
2464  }
2465  return nFetchFlags;
2466 }
2467 
2468 void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2469 {
2470  BlockTransactions resp(req);
2471  for (size_t i = 0; i < req.indexes.size(); i++) {
2472  if (req.indexes[i] >= block.vtx.size()) {
2473  Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
2474  return;
2475  }
2476  resp.txn[i] = block.vtx[req.indexes[i]];
2477  }
2478 
2479  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2480  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
2481 }
2482 
2483 bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2484 {
2485  // Do these headers have proof-of-work matching what's claimed?
2486  if (!HasValidProofOfWork(headers, consensusParams)) {
2487  Misbehaving(peer, 100, "header with invalid proof of work");
2488  return false;
2489  }
2490 
2491  // Are these headers connected to each other?
2492  if (!CheckHeadersAreContinuous(headers)) {
2493  Misbehaving(peer, 20, "non-continuous headers sequence");
2494  return false;
2495  }
2496  return true;
2497 }
2498 
2499 arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2500 {
2501  arith_uint256 near_chaintip_work = 0;
2502  LOCK(cs_main);
2503  if (m_chainman.ActiveChain().Tip() != nullptr) {
2504  const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2505  // Use a 144 block buffer, so that we'll accept headers that fork from
2506  // near our tip.
2507  near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2508  }
2509  return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2510 }
2511 
2524 void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
2525  const std::vector<CBlockHeader>& headers)
2526 {
2527  peer.m_num_unconnecting_headers_msgs++;
2528  // Try to fill in the missing headers.
2529  const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2530  if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2531  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
2532  headers[0].GetHash().ToString(),
2533  headers[0].hashPrevBlock.ToString(),
2534  best_header->nHeight,
2535  pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2536  }
2537 
2538  // Set hashLastUnknownBlock for this peer, so that if we
2539  // eventually get the headers - even from a different peer -
2540  // we can use this peer to download.
2541  WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2542 
2543  // The peer may just be broken, so periodically assign DoS points if this
2544  // condition persists.
2545  if (peer.m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0) {
2546  Misbehaving(peer, 20, strprintf("%d non-connecting headers", peer.m_num_unconnecting_headers_msgs));
2547  }
2548 }
2549 
2550 bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2551 {
2552  uint256 hashLastBlock;
2553  for (const CBlockHeader& header : headers) {
2554  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2555  return false;
2556  }
2557  hashLastBlock = header.GetHash();
2558  }
2559  return true;
2560 }
2561 
2562 bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2563 {
2564  if (peer.m_headers_sync) {
2565  auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
2566  if (result.request_more) {
2567  auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2568  // If we were instructed to ask for a locator, it should not be empty.
2569  Assume(!locator.vHave.empty());
2570  if (!locator.vHave.empty()) {
2571  // It should be impossible for the getheaders request to fail,
2572  // because we should have cleared the last getheaders timestamp
2573  // when processing the headers that triggered this call. But
2574  // it may be possible to bypass this via compactblock
2575  // processing, so check the result before logging just to be
2576  // safe.
2577  bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2578  if (sent_getheaders) {
2579  LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2580  locator.vHave.front().ToString(), pfrom.GetId());
2581  } else {
2582  LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n",
2583  locator.vHave.front().ToString(), pfrom.GetId());
2584  }
2585  }
2586  }
2587 
2588  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2589  peer.m_headers_sync.reset(nullptr);
2590 
2591  // Delete this peer's entry in m_headers_presync_stats.
2592  // If this is m_headers_presync_bestpeer, it will be replaced later
2593  // by the next peer that triggers the else{} branch below.
2594  LOCK(m_headers_presync_mutex);
2595  m_headers_presync_stats.erase(pfrom.GetId());
2596  } else {
2597  // Build statistics for this peer's sync.
2598  HeadersPresyncStats stats;
2599  stats.first = peer.m_headers_sync->GetPresyncWork();
2600  if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2601  stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2602  peer.m_headers_sync->GetPresyncTime()};
2603  }
2604 
2605  // Update statistics in stats.
2606  LOCK(m_headers_presync_mutex);
2607  m_headers_presync_stats[pfrom.GetId()] = stats;
2608  auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2609  bool best_updated = false;
2610  if (best_it == m_headers_presync_stats.end()) {
2611  // If the cached best peer is outdated, iterate over all remaining ones (including
2612  // newly updated one) to find the best one.
2613  NodeId peer_best{-1};
2614  const HeadersPresyncStats* stat_best{nullptr};
2615  for (const auto& [peer, stat] : m_headers_presync_stats) {
2616  if (!stat_best || stat > *stat_best) {
2617  peer_best = peer;
2618  stat_best = &stat;
2619  }
2620  }
2621  m_headers_presync_bestpeer = peer_best;
2622  best_updated = (peer_best == pfrom.GetId());
2623  } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2624  // pfrom was and remains the best peer, or pfrom just became best.
2625  m_headers_presync_bestpeer = pfrom.GetId();
2626  best_updated = true;
2627  }
2628  if (best_updated && stats.second.has_value()) {
2629  // If the best peer updated, and it is in its first phase, signal.
2630  m_headers_presync_should_signal = true;
2631  }
2632  }
2633 
2634  if (result.success) {
2635  // We only overwrite the headers passed in if processing was
2636  // successful.
2637  headers.swap(result.pow_validated_headers);
2638  }
2639 
2640  return result.success;
2641  }
2642  // Either we didn't have a sync in progress, or something went wrong
2643  // processing these headers, or we are returning headers to the caller to
2644  // process.
2645  return false;
2646 }
2647 
2648 bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2649 {
2650  // Calculate the total work on this chain.
2651  arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers);
2652 
2653  // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2654  // before we'll store it)
2655  arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2656 
2657  // Avoid DoS via low-difficulty-headers by only processing if the headers
2658  // are part of a chain with sufficient work.
2659  if (total_work < minimum_chain_work) {
2660  // Only try to sync with this peer if their headers message was full;
2661  // otherwise they don't have more headers after this so no point in
2662  // trying to sync their too-little-work chain.
2663  if (headers.size() == MAX_HEADERS_RESULTS) {
2664  // Note: we could advance to the last header in this set that is
2665  // known to us, rather than starting at the first header (which we
2666  // may already have); however this is unlikely to matter much since
2667  // ProcessHeadersMessage() already handles the case where all
2668  // headers in a received message are already known and are
2669  // ancestors of m_best_header or chainActive.Tip(), by skipping
2670  // this logic in that case. So even if the first header in this set
2671  // of headers is known, some header in this set must be new, so
2672  // advancing to the first unknown header would be a small effect.
2673  LOCK(peer.m_headers_sync_mutex);
2674  peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2675  chain_start_header, minimum_chain_work));
2676 
2677  // Now a HeadersSyncState object for tracking this synchronization
2678  // is created, process the headers using it as normal. Failures are
2679  // handled inside of IsContinuationOfLowWorkHeadersSync.
2680  (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2681  } else {
2682  LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2683  }
2684 
2685  // The peer has not yet given us a chain that meets our work threshold,
2686  // so we want to prevent further processing of the headers in any case.
2687  headers = {};
2688  return true;
2689  }
2690 
2691  return false;
2692 }
2693 
2694 bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2695 {
2696  if (header == nullptr) {
2697  return false;
2698  } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2699  return true;
2700  } else if (m_chainman.ActiveChain().Contains(header)) {
2701  return true;
2702  }
2703  return false;
2704 }
2705 
2706 bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2707 {
2708  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2709 
2710  const auto current_time = NodeClock::now();
2711 
2712  // Only allow a new getheaders message to go out if we don't have a recent
2713  // one already in-flight
2714  if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2715  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
2716  peer.m_last_getheaders_timestamp = current_time;
2717  return true;
2718  }
2719  return false;
2720 }
2721 
2722 /*
2723  * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2724  * We require that the given tip have at least as much work as our tip, and for
2725  * our current tip to be "close to synced" (see CanDirectFetch()).
2726  */
2727 void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2728 {
2729  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2730 
2731  LOCK(cs_main);
2732  CNodeState *nodestate = State(pfrom.GetId());
2733 
2734  if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2735  std::vector<const CBlockIndex*> vToFetch;
2736  const CBlockIndex* pindexWalk{&last_header};
2737  // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2738  while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2739  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2740  !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2741  (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2742  // We don't have this block, and it's not yet in flight.
2743  vToFetch.push_back(pindexWalk);
2744  }
2745  pindexWalk = pindexWalk->pprev;
2746  }
2747  // If pindexWalk still isn't on our main chain, we're looking at a
2748  // very large reorg at a time we think we're close to caught up to
2749  // the main chain -- this shouldn't really happen. Bail out on the
2750  // direct fetch and rely on parallel download instead.
2751  if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2752  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2753  last_header.GetBlockHash().ToString(),
2754  last_header.nHeight);
2755  } else {
2756  std::vector<CInv> vGetData;
2757  // Download as much as possible, from earliest to latest.
2758  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2759  if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2760  // Can't download any more from this peer
2761  break;
2762  }
2763  uint32_t nFetchFlags = GetFetchFlags(peer);
2764  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2765  BlockRequested(pfrom.GetId(), *pindex);
2766  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2767  pindex->GetBlockHash().ToString(), pfrom.GetId());
2768  }
2769  if (vGetData.size() > 1) {
2770  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2771  last_header.GetBlockHash().ToString(),
2772  last_header.nHeight);
2773  }
2774  if (vGetData.size() > 0) {
2775  if (!m_opts.ignore_incoming_txs &&
2776  nodestate->m_provides_cmpctblocks &&
2777  vGetData.size() == 1 &&
2778  mapBlocksInFlight.size() == 1 &&
2779  last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2780  // In any case, we want to download using a compact block, not a regular one
2781  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2782  }
2783  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
2784  }
2785  }
2786  }
2787 }
2788 
2794 void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2795  const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2796 {
2797  if (peer.m_num_unconnecting_headers_msgs > 0) {
2798  LogPrint(BCLog::NET, "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n", pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2799  }
2800  peer.m_num_unconnecting_headers_msgs = 0;
2801 
2802  LOCK(cs_main);
2803  CNodeState *nodestate = State(pfrom.GetId());
2804 
2805  UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2806 
2807  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2808  // because it is set in UpdateBlockAvailability. Some nullptr checks
2809  // are still present, however, as belt-and-suspenders.
2810 
2811  if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2812  nodestate->m_last_block_announcement = GetTime();
2813  }
2814 
2815  // If we're in IBD, we want outbound peers that will serve us a useful
2816  // chain. Disconnect peers that are on chains with insufficient work.
2817  if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2818  // If the peer has no more headers to give us, then we know we have
2819  // their tip.
2820  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2821  // This peer has too little work on their headers chain to help
2822  // us sync -- disconnect if it is an outbound disconnection
2823  // candidate.
2824  // Note: We compare their tip to the minimum chain work (rather than
2825  // m_chainman.ActiveChain().Tip()) because we won't start block download
2826  // until we have a headers chain that has at least
2827  // the minimum chain work, even if a peer has a chain past our tip,
2828  // as an anti-DoS measure.
2829  if (pfrom.IsOutboundOrBlockRelayConn()) {
2830  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
2831  pfrom.fDisconnect = true;
2832  }
2833  }
2834  }
2835 
2836  // If this is an outbound full-relay peer, check to see if we should protect
2837  // it from the bad/lagging chain logic.
2838  // Note that outbound block-relay peers are excluded from this protection, and
2839  // thus always subject to eviction under the bad/lagging chain logic.
2840  // See ChainSyncTimeoutState.
2841  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2842  if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2843  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2844  nodestate->m_chain_sync.m_protect = true;
2845  ++m_outbound_peers_with_protect_from_disconnect;
2846  }
2847  }
2848 }
2849 
2850 void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2851  std::vector<CBlockHeader>&& headers,
2852  bool via_compact_block)
2853 {
2854  size_t nCount = headers.size();
2855 
2856  if (nCount == 0) {
2857  // Nothing interesting. Stop asking this peers for more headers.
2858  // If we were in the middle of headers sync, receiving an empty headers
2859  // message suggests that the peer suddenly has nothing to give us
2860  // (perhaps it reorged to our chain). Clear download state for this peer.
2861  LOCK(peer.m_headers_sync_mutex);
2862  if (peer.m_headers_sync) {
2863  peer.m_headers_sync.reset(nullptr);
2864  LOCK(m_headers_presync_mutex);
2865  m_headers_presync_stats.erase(pfrom.GetId());
2866  }
2867  return;
2868  }
2869 
2870  // Before we do any processing, make sure these pass basic sanity checks.
2871  // We'll rely on headers having valid proof-of-work further down, as an
2872  // anti-DoS criteria (note: this check is required before passing any
2873  // headers into HeadersSyncState).
2874  if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
2875  // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2876  // just return. (Note that even if a header is announced via compact
2877  // block, the header itself should be valid, so this type of error can
2878  // always be punished.)
2879  return;
2880  }
2881 
2882  const CBlockIndex *pindexLast = nullptr;
2883 
2884  // We'll set already_validated_work to true if these headers are
2885  // successfully processed as part of a low-work headers sync in progress
2886  // (either in PRESYNC or REDOWNLOAD phase).
2887  // If true, this will mean that any headers returned to us (ie during
2888  // REDOWNLOAD) can be validated without further anti-DoS checks.
2889  bool already_validated_work = false;
2890 
2891  // If we're in the middle of headers sync, let it do its magic.
2892  bool have_headers_sync = false;
2893  {
2894  LOCK(peer.m_headers_sync_mutex);
2895 
2896  already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2897 
2898  // The headers we passed in may have been:
2899  // - untouched, perhaps if no headers-sync was in progress, or some
2900  // failure occurred
2901  // - erased, such as if the headers were successfully processed and no
2902  // additional headers processing needs to take place (such as if we
2903  // are still in PRESYNC)
2904  // - replaced with headers that are now ready for validation, such as
2905  // during the REDOWNLOAD phase of a low-work headers sync.
2906  // So just check whether we still have headers that we need to process,
2907  // or not.
2908  if (headers.empty()) {
2909  return;
2910  }
2911 
2912  have_headers_sync = !!peer.m_headers_sync;
2913  }
2914 
2915  // Do these headers connect to something in our block index?
2916  const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
2917  bool headers_connect_blockindex{chain_start_header != nullptr};
2918 
2919  if (!headers_connect_blockindex) {
2920  if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
2921  // If this looks like it could be a BIP 130 block announcement, use
2922  // special logic for handling headers that don't connect, as this
2923  // could be benign.
2924  HandleFewUnconnectingHeaders(pfrom, peer, headers);
2925  } else {
2926  Misbehaving(peer, 10, "invalid header received");
2927  }
2928  return;
2929  }
2930 
2931  // If the headers we received are already in memory and an ancestor of
2932  // m_best_header or our tip, skip anti-DoS checks. These headers will not
2933  // use any more memory (and we are not leaking information that could be
2934  // used to fingerprint us).
2935  const CBlockIndex *last_received_header{nullptr};
2936  {
2937  LOCK(cs_main);
2938  last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
2939  if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2940  already_validated_work = true;
2941  }
2942  }
2943 
2944  // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
2945  // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
2946  // on startup).
2948  already_validated_work = true;
2949  }
2950 
2951  // At this point, the headers connect to something in our block index.
2952  // Do anti-DoS checks to determine if we should process or store for later
2953  // processing.
2954  if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2955  chain_start_header, headers)) {
2956  // If we successfully started a low-work headers sync, then there
2957  // should be no headers to process any further.
2958  Assume(headers.empty());
2959  return;
2960  }
2961 
2962  // At this point, we have a set of headers with sufficient work on them
2963  // which can be processed.
2964 
2965  // If we don't have the last header, then this peer will have given us
2966  // something new (if these headers are valid).
2967  bool received_new_header{last_received_header == nullptr};
2968 
2969  // Now process all the headers.
2970  BlockValidationState state;
2971  if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) {
2972  if (state.IsInvalid()) {
2973  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
2974  return;
2975  }
2976  }
2977  assert(pindexLast);
2978 
2979  // Consider fetching more headers if we are not using our headers-sync mechanism.
2980  if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
2981  // Headers message had its maximum size; the peer may have more headers.
2982  if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
2983  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2984  pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
2985  }
2986  }
2987 
2988  UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
2989 
2990  // Consider immediately downloading blocks.
2991  HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
2992 
2993  return;
2994 }
2995 
2996 bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
2997 {
2998  AssertLockHeld(g_msgproc_mutex);
2999  LOCK(cs_main);
3000 
3001  CTransactionRef porphanTx = nullptr;
3002 
3003  while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) {
3004  const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3005  const TxValidationState& state = result.m_state;
3006  const uint256& orphanHash = porphanTx->GetHash();
3007  const uint256& orphan_wtxid = porphanTx->GetWitnessHash();
3008 
3010  LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3011  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3012  peer.m_id,
3013  orphanHash.ToString(),
3014  orphan_wtxid.ToString(),
3015  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3016  RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
3017  m_orphanage.AddChildrenToWorkSet(*porphanTx);
3018  m_orphanage.EraseTx(orphanHash);
3019  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
3020  AddToCompactExtraTransactions(removedTx);
3021  }
3022  return true;
3023  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3024  if (state.IsInvalid()) {
3025  LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3026  orphanHash.ToString(),
3027  orphan_wtxid.ToString(),
3028  peer.m_id,
3029  state.ToString());
3030  LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
3031  orphanHash.ToString(),
3032  orphan_wtxid.ToString(),
3033  peer.m_id,
3034  state.ToString());
3035  // Maybe punish peer that gave us an invalid orphan tx
3036  MaybePunishNodeForTx(peer.m_id, state);
3037  }
3038  // Has inputs but not accepted to mempool
3039  // Probably non-standard or insufficient fee
3040  LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3042  // We can add the wtxid of this transaction to our reject filter.
3043  // Do not add txids of witness transactions or witness-stripped
3044  // transactions to the filter, as they can have been malleated;
3045  // adding such txids to the reject filter would potentially
3046  // interfere with relay of valid transactions from peers that
3047  // do not support wtxid-based relay. See
3048  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3049  // We can remove this restriction (and always add wtxids to
3050  // the filter even for witness stripped transactions) once
3051  // wtxid-based relay is broadly deployed.
3052  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3053  // for concerns around weakening security of unupgraded nodes
3054  // if we start doing this too early.
3055  m_recent_rejects.insert(porphanTx->GetWitnessHash());
3056  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3057  // then we know that the witness was irrelevant to the policy
3058  // failure, since this check depends only on the txid
3059  // (the scriptPubKey being spent is covered by the txid).
3060  // Add the txid to the reject filter to prevent repeated
3061  // processing of this transaction in the event that child
3062  // transactions are later received (resulting in
3063  // parent-fetching by txid via the orphan-handling logic).
3064  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
3065  // We only add the txid if it differs from the wtxid, to
3066  // avoid wasting entries in the rolling bloom filter.
3067  m_recent_rejects.insert(porphanTx->GetHash());
3068  }
3069  }
3070  m_orphanage.EraseTx(orphanHash);
3071  return true;
3072  }
3073  }
3074 
3075  return false;
3076 }
3077 
3078 bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3079  BlockFilterType filter_type, uint32_t start_height,
3080  const uint256& stop_hash, uint32_t max_height_diff,
3081  const CBlockIndex*& stop_index,
3082  BlockFilterIndex*& filter_index)
3083 {
3084  const bool supported_filter_type =
3085  (filter_type == BlockFilterType::BASIC &&
3086  (peer.m_our_services & NODE_COMPACT_FILTERS));
3087  if (!supported_filter_type) {
3088  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
3089  node.GetId(), static_cast<uint8_t>(filter_type));
3090  node.fDisconnect = true;
3091  return false;
3092  }
3093 
3094  {
3095  LOCK(cs_main);
3096  stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3097 
3098  // Check that the stop block exists and the peer would be allowed to fetch it.
3099  if (!stop_index || !BlockRequestAllowed(stop_index)) {
3100  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
3101  node.GetId(), stop_hash.ToString());
3102  node.fDisconnect = true;
3103  return false;
3104  }
3105  }
3106 
3107  uint32_t stop_height = stop_index->nHeight;
3108  if (start_height > stop_height) {
3109  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with "
3110  "start height %d and stop height %d\n",
3111  node.GetId(), start_height, stop_height);
3112  node.fDisconnect = true;
3113  return false;
3114  }
3115  if (stop_height - start_height >= max_height_diff) {
3116  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
3117  node.GetId(), stop_height - start_height + 1, max_height_diff);
3118  node.fDisconnect = true;
3119  return false;
3120  }
3121 
3122  filter_index = GetBlockFilterIndex(filter_type);
3123  if (!filter_index) {
3124  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3125  return false;
3126  }
3127 
3128  return true;
3129 }
3130 
3131 void PeerManagerImpl::ProcessGetCFilters(CNode& node,Peer& peer, CDataStream& vRecv)
3132 {
3133  uint8_t filter_type_ser;
3134  uint32_t start_height;
3135  uint256 stop_hash;
3136 
3137  vRecv >> filter_type_ser >> start_height >> stop_hash;
3138 
3139  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3140 
3141  const CBlockIndex* stop_index;
3142  BlockFilterIndex* filter_index;
3143  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3144  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3145  return;
3146  }
3147 
3148  std::vector<BlockFilter> filters;
3149  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3150  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3151  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3152  return;
3153  }
3154 
3155  for (const auto& filter : filters) {
3156  CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3157  .Make(NetMsgType::CFILTER, filter);
3158  m_connman.PushMessage(&node, std::move(msg));
3159  }
3160 }
3161 
3162 void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv)
3163 {
3164  uint8_t filter_type_ser;
3165  uint32_t start_height;
3166  uint256 stop_hash;
3167 
3168  vRecv >> filter_type_ser >> start_height >> stop_hash;
3169 
3170  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3171 
3172  const CBlockIndex* stop_index;
3173  BlockFilterIndex* filter_index;
3174  if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3175  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3176  return;
3177  }
3178 
3179  uint256 prev_header;
3180  if (start_height > 0) {
3181  const CBlockIndex* const prev_block =
3182  stop_index->GetAncestor(static_cast<int>(start_height - 1));
3183  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3184  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3185  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3186  return;
3187  }
3188  }
3189 
3190  std::vector<uint256> filter_hashes;
3191  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3192  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3193  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3194  return;
3195  }
3196 
3197  CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3198  .Make(NetMsgType::CFHEADERS,
3199  filter_type_ser,
3200  stop_index->GetBlockHash(),
3201  prev_header,
3202  filter_hashes);
3203  m_connman.PushMessage(&node, std::move(msg));
3204 }
3205 
3206 void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv)
3207 {
3208  uint8_t filter_type_ser;
3209  uint256 stop_hash;
3210 
3211  vRecv >> filter_type_ser >> stop_hash;
3212 
3213  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3214 
3215  const CBlockIndex* stop_index;
3216  BlockFilterIndex* filter_index;
3217  if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3218  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3219  stop_index, filter_index)) {
3220  return;
3221  }
3222 
3223  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3224 
3225  // Populate headers.
3226  const CBlockIndex* block_index = stop_index;
3227  for (int i = headers.size() - 1; i >= 0; i--) {
3228  int height = (i + 1) * CFCHECKPT_INTERVAL;
3229  block_index = block_index->GetAncestor(height);
3230 
3231  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3232  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3233  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3234  return;
3235  }
3236  }
3237 
3238  CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3239  .Make(NetMsgType::CFCHECKPT,
3240  filter_type_ser,
3241  stop_index->GetBlockHash(),
3242  headers);
3243  m_connman.PushMessage(&node, std::move(msg));
3244 }
3245 
3246 void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3247 {
3248  bool new_block{false};
3249  m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3250  if (new_block) {
3251  node.m_last_block_time = GetTime<std::chrono::seconds>();
3252  // In case this block came from a different peer than we requested
3253  // from, we can erase the block request now anyway (as we just stored
3254  // this block to disk).
3255  LOCK(cs_main);
3256  RemoveBlockRequest(block->GetHash(), std::nullopt);
3257  } else {
3258  LOCK(cs_main);
3259  mapBlockSource.erase(block->GetHash());
3260  }
3261 }
3262 
3263 void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3264 {
3265  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3266  bool fBlockRead{false};
3267  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3268  {
3269  LOCK(cs_main);
3270 
3271  auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3272  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3273  bool requested_block_from_this_peer{false};
3274 
3275  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3276  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3277 
3278  while (range_flight.first != range_flight.second) {
3279  auto [node_id, block_it] = range_flight.first->second;
3280  if (node_id == pfrom.GetId() && block_it->partialBlock) {
3281  requested_block_from_this_peer = true;
3282  break;
3283  }
3284  range_flight.first++;
3285  }
3286 
3287  if (!requested_block_from_this_peer) {
3288  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3289  return;
3290  }
3291 
3292  PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3293  ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3294  if (status == READ_STATUS_INVALID) {
3295  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3296  Misbehaving(peer, 100, "invalid compact block/non-matching block transactions");
3297  return;
3298  } else if (status == READ_STATUS_FAILED) {
3299  if (first_in_flight) {
3300  // Might have collided, fall back to getdata now :(
3301  std::vector<CInv> invs;
3302  invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3303  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3304  } else {
3305  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3306  LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3307  return;
3308  }
3309  } else {
3310  // Block is either okay, or possibly we received
3311  // READ_STATUS_CHECKBLOCK_FAILED.
3312  // Note that CheckBlock can only fail for one of a few reasons:
3313  // 1. bad-proof-of-work (impossible here, because we've already
3314  // accepted the header)
3315  // 2. merkleroot doesn't match the transactions given (already
3316  // caught in FillBlock with READ_STATUS_FAILED, so
3317  // impossible here)
3318  // 3. the block is otherwise invalid (eg invalid coinbase,
3319  // block is too big, too many legacy sigops, etc).
3320  // So if CheckBlock failed, #3 is the only possibility.
3321  // Under BIP 152, we don't discourage the peer unless proof of work is
3322  // invalid (we don't require all the stateless checks to have
3323  // been run). This is handled below, so just treat this as
3324  // though the block was successfully read, and rely on the
3325  // handling in ProcessNewBlock to ensure the block index is
3326  // updated, etc.
3327  RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3328  fBlockRead = true;
3329  // mapBlockSource is used for potentially punishing peers and
3330  // updating which peers send us compact blocks, so the race
3331  // between here and cs_main in ProcessNewBlock is fine.
3332  // BIP 152 permits peers to relay compact blocks after validating
3333  // the header only; we should not punish peers if the block turns
3334  // out to be invalid.
3335  mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3336  }
3337  } // Don't hold cs_main when we call into ProcessNewBlock
3338  if (fBlockRead) {
3339  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3340  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3341  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3342  // disk-space attacks), but this should be safe due to the
3343  // protections in the compact block handler -- see related comment
3344  // in compact block optimistic reconstruction handling.
3345  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3346  }
3347  return;
3348 }
3349 
3350 void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
3351  const std::chrono::microseconds time_received,
3352  const std::atomic<bool>& interruptMsgProc)
3353 {
3354  AssertLockHeld(g_msgproc_mutex);
3355 
3356  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3357 
3358  PeerRef peer = GetPeerRef(pfrom.GetId());
3359  if (peer == nullptr) return;
3360 
3361  if (msg_type == NetMsgType::VERSION) {
3362  if (pfrom.nVersion != 0) {
3363  LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3364  return;
3365  }
3366 
3367  int64_t nTime;
3368  CService addrMe;
3369  uint64_t nNonce = 1;
3370  ServiceFlags nServices;
3371  int nVersion;
3372  std::string cleanSubVer;
3373  int starting_height = -1;
3374  bool fRelay = true;
3375 
3376  vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3377  if (nTime < 0) {
3378  nTime = 0;
3379  }
3380  vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3381  vRecv >> CNetAddr::V1(addrMe);
3382  if (!pfrom.IsInboundConn())
3383  {
3384  m_addrman.SetServices(pfrom.addr, nServices);
3385  }
3386  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3387  {
3388  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
3389  pfrom.fDisconnect = true;
3390  return;
3391  }
3392 
3393  if (nVersion < MIN_PEER_PROTO_VERSION) {
3394  // disconnect from peers older than this proto version
3395  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
3396  pfrom.fDisconnect = true;
3397  return;
3398  }
3399 
3400  if (!vRecv.empty()) {
3401  // The version message includes information about the sending node which we don't use:
3402  // - 8 bytes (service bits)
3403  // - 16 bytes (ipv6 address)
3404  // - 2 bytes (port)
3405  vRecv.ignore(26);
3406  vRecv >> nNonce;
3407  }
3408  if (!vRecv.empty()) {
3409  std::string strSubVer;
3410  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3411  cleanSubVer = SanitizeString(strSubVer);
3412  }
3413  if (!vRecv.empty()) {
3414  vRecv >> starting_height;
3415  }
3416  if (!vRecv.empty())
3417  vRecv >> fRelay;
3418  // Disconnect if we connected to ourself
3419  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3420  {
3421  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3422  pfrom.fDisconnect = true;
3423  return;
3424  }
3425 
3426  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3427  {
3428  SeenLocal(addrMe);
3429  }
3430 
3431  // Inbound peers send us their version message when they connect.
3432  // We send our version message in response.
3433  if (pfrom.IsInboundConn()) {
3434  PushNodeVersion(pfrom, *peer);
3435  }
3436 
3437  // Change version
3438  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3439  pfrom.SetCommonVersion(greatest_common_version);
3440  pfrom.nVersion = nVersion;
3441 
3442  const CNetMsgMaker msg_maker(greatest_common_version);
3443 
3444  if (greatest_common_version >= WTXID_RELAY_VERSION) {
3445  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
3446  }
3447 
3448  // Signal ADDRv2 support (BIP155).
3449  if (greatest_common_version >= 70016) {
3450  // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3451  // implementations reject messages they don't know. As a courtesy, don't send
3452  // it to nodes with a version before 70016, as no software is known to support
3453  // BIP155 that doesn't announce at least that protocol version number.
3454  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
3455  }
3456 
3458  peer->m_their_services = nServices;
3459  pfrom.SetAddrLocal(addrMe);
3460  {
3461  LOCK(pfrom.m_subver_mutex);
3462  pfrom.cleanSubVer = cleanSubVer;
3463  }
3464  peer->m_starting_height = starting_height;
3465 
3466  // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3467  // - this isn't an outbound block-relay-only connection, and
3468  // - this isn't an outbound feeler connection, and
3469  // - fRelay=true (the peer wishes to receive transaction announcements)
3470  // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3471  // the peer may turn on transaction relay later.
3472  if (!pfrom.IsBlockOnlyConn() &&
3473  !pfrom.IsFeelerConn() &&
3474  (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3475  auto* const tx_relay = peer->SetTxRelay();
3476  {
3477  LOCK(tx_relay->m_bloom_filter_mutex);
3478  tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3479  }
3480  if (fRelay) pfrom.m_relays_txs = true;
3481  }
3482 
3483  if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3484  // Per BIP-330, we announce txreconciliation support if:
3485  // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3486  // - transaction relay is supported per the peer's VERSION message
3487  // - this is not a block-relay-only connection and not a feeler
3488  // - this is not an addr fetch connection;
3489  // - we are not in -blocksonly mode.
3490  const auto* tx_relay = peer->GetTxRelay();
3491  if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3492  !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3493  const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3494  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDTXRCNCL,
3495  TXRECONCILIATION_VERSION, recon_salt));
3496  }
3497  }
3498 
3499  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
3500 
3501  // Potentially mark this peer as a preferred download peer.
3502  {
3503  LOCK(cs_main);
3504  CNodeState* state = State(pfrom.GetId());
3505  state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3506  m_num_preferred_download_peers += state->fPreferredDownload;
3507  }
3508 
3509  // Attempt to initialize address relay for outbound peers and use result
3510  // to decide whether to send GETADDR, so that we don't send it to
3511  // inbound or outbound block-relay-only peers.
3512  bool send_getaddr{false};
3513  if (!pfrom.IsInboundConn()) {
3514  send_getaddr = SetupAddressRelay(pfrom, *peer);
3515  }
3516  if (send_getaddr) {
3517  // Do a one-time address fetch to help populate/update our addrman.
3518  // If we're starting up for the first time, our addrman may be pretty
3519  // empty, so this mechanism is important to help us connect to the network.
3520  // We skip this for block-relay-only peers. We want to avoid
3521  // potentially leaking addr information and we do not want to
3522  // indicate to the peer that we will participate in addr relay.
3523  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
3524  peer->m_getaddr_sent = true;
3525  // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3526  // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3527  peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3528  }
3529 
3530  if (!pfrom.IsInboundConn()) {
3531  // For non-inbound connections, we update the addrman to record
3532  // connection success so that addrman will have an up-to-date
3533  // notion of which peers are online and available.
3534  //
3535  // While we strive to not leak information about block-relay-only
3536  // connections via the addrman, not moving an address to the tried
3537  // table is also potentially detrimental because new-table entries
3538  // are subject to eviction in the event of addrman collisions. We
3539  // mitigate the information-leak by never calling
3540  // AddrMan::Connected() on block-relay-only peers; see
3541  // FinalizeNode().
3542  //
3543  // This moves an address from New to Tried table in Addrman,
3544  // resolves tried-table collisions, etc.
3545  m_addrman.Good(pfrom.addr);
3546  }
3547 
3548  std::string remoteAddr;
3549  if (fLogIPs)
3550  remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
3551 
3552  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3553  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3554  cleanSubVer, pfrom.nVersion,
3555  peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3556  remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3557 
3558  int64_t nTimeOffset = nTime - GetTime();
3559  pfrom.nTimeOffset = nTimeOffset;
3560  if (!pfrom.IsInboundConn()) {
3561  // Don't use timedata samples from inbound peers to make it
3562  // harder for others to tamper with our adjusted time.
3563  AddTimeData(pfrom.addr, nTimeOffset);
3564  }
3565 
3566  // If the peer is old enough to have the old alert system, send it the final alert.
3567  if (greatest_common_version <= 70012) {
3568  const auto finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
3569  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", Span{finalAlert}));
3570  }
3571 
3572  // Feeler connections exist only to verify if address is online.
3573  if (pfrom.IsFeelerConn()) {
3574  LogPrint(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
3575  pfrom.fDisconnect = true;
3576  }
3577  return;
3578  }
3579 
3580  if (pfrom.nVersion == 0) {
3581  // Must have a version message before anything else
3582  LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3583  return;
3584  }
3585 
3586  // At this point, the outgoing message serialization version can't change.
3587  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3588 
3589  if (msg_type == NetMsgType::VERACK) {
3590  if (pfrom.fSuccessfullyConnected) {
3591  LogPrint(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3592  return;
3593  }
3594 
3595  // Log succesful connections unconditionally for outbound, but not for inbound as those
3596  // can be triggered by an attacker at high rate.
3598  const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3599  LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3600  pfrom.ConnectionTypeAsString(),
3601  TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3602  pfrom.nVersion.load(), peer->m_starting_height,
3603  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
3604  (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3605  }
3606 
3607  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3608  // Tell our peer we are willing to provide version 2 cmpctblocks.
3609  // However, we do not request new block announcements using
3610  // cmpctblock messages.
3611  // We send this to non-NODE NETWORK peers as well, because
3612  // they may wish to request compact blocks from us
3613  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION));
3614  }
3615 
3616  if (m_txreconciliation) {
3617  if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3618  // We could have optimistically pre-registered/registered the peer. In that case,
3619  // we should forget about the reconciliation state here if this wasn't followed
3620  // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3621  m_txreconciliation->ForgetPeer(pfrom.GetId());
3622  }
3623  }
3624 
3625  if (auto tx_relay = peer->GetTxRelay()) {
3626  // `TxRelay::m_tx_inventory_to_send` must be empty before the
3627  // version handshake is completed as
3628  // `TxRelay::m_next_inv_send_time` is first initialised in
3629  // `SendMessages` after the verack is received. Any transactions
3630  // received during the version handshake would otherwise
3631  // immediately be advertised without random delay, potentially
3632  // leaking the time of arrival to a spy.
3633  Assume(WITH_LOCK(
3634  tx_relay->m_tx_inventory_mutex,
3635  return tx_relay->m_tx_inventory_to_send.empty() &&
3636  tx_relay->m_next_inv_send_time == 0s));
3637  }
3638 
3639  pfrom.fSuccessfullyConnected = true;
3640  return;
3641  }
3642 
3643  if (msg_type == NetMsgType::SENDHEADERS) {
3644  peer->m_prefers_headers = true;
3645  return;
3646  }
3647 
3648  if (msg_type == NetMsgType::SENDCMPCT) {
3649  bool sendcmpct_hb{false};
3650  uint64_t sendcmpct_version{0};
3651  vRecv >> sendcmpct_hb >> sendcmpct_version;
3652 
3653  // Only support compact block relay with witnesses
3654  if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3655 
3656  LOCK(cs_main);
3657  CNodeState* nodestate = State(pfrom.GetId());
3658  nodestate->m_provides_cmpctblocks = true;
3659  nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3660  // save whether peer selects us as BIP152 high-bandwidth peer
3661  // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3662  pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3663  return;
3664  }
3665 
3666  // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3667  // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3668  if (msg_type == NetMsgType::WTXIDRELAY) {
3669  if (pfrom.fSuccessfullyConnected) {
3670  // Disconnect peers that send a wtxidrelay message after VERACK.
3671  LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3672  pfrom.fDisconnect = true;
3673  return;
3674  }
3675  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3676  if (!peer->m_wtxid_relay) {
3677  peer->m_wtxid_relay = true;
3678  m_wtxid_relay_peers++;
3679  } else {
3680  LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3681  }
3682  } else {
3683  LogPrint(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3684  }
3685  return;
3686  }
3687 
3688  // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3689  // between VERSION and VERACK.
3690  if (msg_type == NetMsgType::SENDADDRV2) {
3691  if (pfrom.fSuccessfullyConnected) {
3692  // Disconnect peers that send a SENDADDRV2 message after VERACK.
3693  LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3694  pfrom.fDisconnect = true;
3695  return;
3696  }
3697  peer->m_wants_addrv2 = true;
3698  return;
3699  }
3700 
3701  // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3702  // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3703  // from switching announcement protocols after the connection is up.
3704  if (msg_type == NetMsgType::SENDTXRCNCL) {
3705  if (!m_txreconciliation) {
3706  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3707  return;
3708  }
3709 
3710  if (pfrom.fSuccessfullyConnected) {
3711  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3712  pfrom.fDisconnect = true;
3713  return;
3714  }
3715 
3716  // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3717  if (RejectIncomingTxs(pfrom)) {
3718  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId());
3719  pfrom.fDisconnect = true;
3720  return;
3721  }
3722 
3723  // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3724  // This flag might also be false in other cases, but the RejectIncomingTxs check above
3725  // eliminates them, so that this flag fully represents what we are looking for.
3726  const auto* tx_relay = peer->GetTxRelay();
3727  if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3728  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId());
3729  pfrom.fDisconnect = true;
3730  return;
3731  }
3732 
3733  uint32_t peer_txreconcl_version;
3734  uint64_t remote_salt;
3735  vRecv >> peer_txreconcl_version >> remote_salt;
3736 
3737  const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3738  peer_txreconcl_version, remote_salt);
3739  switch (result) {
3741  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
3742  break;
3744  break;
3746  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId());
3747  pfrom.fDisconnect = true;
3748  return;
3750  LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId());
3751  pfrom.fDisconnect = true;
3752  return;
3753  }
3754  return;
3755  }
3756 
3757  if (!pfrom.fSuccessfullyConnected) {
3758  LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3759  return;
3760  }
3761 
3762  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3763  const auto ser_params{
3764  msg_type == NetMsgType::ADDRV2 ?
3765  // Set V2 param so that the CNetAddr and CAddress
3766  // unserialize methods know that an address in v2 format is coming.
3769  };
3770 
3771  std::vector<CAddress> vAddr;
3772 
3773  vRecv >> WithParams(ser_params, vAddr);
3774 
3775  if (!SetupAddressRelay(pfrom, *peer)) {
3776  LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3777  return;
3778  }
3779 
3780  if (vAddr.size() > MAX_ADDR_TO_SEND)
3781  {
3782  Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
3783  return;
3784  }
3785 
3786  // Store the new addresses
3787  std::vector<CAddress> vAddrOk;
3788  const auto current_a_time{Now<NodeSeconds>()};
3789 
3790  // Update/increment addr rate limiting bucket.
3791  const auto current_time{GetTime<std::chrono::microseconds>()};
3792  if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3793  // Don't increment bucket if it's already full
3794  const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3795  const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
3796  peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
3797  }
3798  peer->m_addr_token_timestamp = current_time;
3799 
3800  const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
3801  uint64_t num_proc = 0;
3802  uint64_t num_rate_limit = 0;
3803  Shuffle(vAddr.begin(), vAddr.end(), m_rng);
3804  for (CAddress& addr : vAddr)
3805  {
3806  if (interruptMsgProc)
3807  return;
3808 
3809  // Apply rate limiting.
3810  if (peer->m_addr_token_bucket < 1.0) {
3811  if (rate_limited) {
3812  ++num_rate_limit;
3813  continue;
3814  }
3815  } else {
3816  peer->m_addr_token_bucket -= 1.0;
3817  }
3818  // We only bother storing full nodes, though this may include
3819  // things which we would not make an outbound connection to, in
3820  // part because we may make feeler connections to them.
3822  continue;
3823 
3824  if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
3825  addr.nTime = current_a_time - 5 * 24h;
3826  }
3827  AddAddressKnown(*peer, addr);
3828  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3829  // Do not process banned/discouraged addresses beyond remembering we received them
3830  continue;
3831  }
3832  ++num_proc;
3833  const bool reachable{g_reachable_nets.Contains(addr)};
3834  if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
3835  // Relay to a limited number of other nodes
3836  RelayAddress(pfrom.GetId(), addr, reachable);
3837  }
3838  // Do not store addresses outside our network
3839  if (reachable) {
3840  vAddrOk.push_back(addr);
3841  }
3842  }
3843  peer->m_addr_processed += num_proc;
3844  peer->m_addr_rate_limited += num_rate_limit;
3845  LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3846  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3847 
3848  m_addrman.Add(vAddrOk, pfrom.addr, 2h);
3849  if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
3850 
3851  // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
3852  if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3853  LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
3854  pfrom.fDisconnect = true;
3855  }
3856  return;
3857  }
3858 
3859  if (msg_type == NetMsgType::INV) {
3860  std::vector<CInv> vInv;
3861  vRecv >> vInv;
3862  if (vInv.size() > MAX_INV_SZ)
3863  {
3864  Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size()));
3865  return;
3866  }
3867 
3868  const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3869 
3870  LOCK(cs_main);
3871 
3872  const auto current_time{GetTime<std::chrono::microseconds>()};
3873  uint256* best_block{nullptr};
3874 
3875  for (CInv& inv : vInv) {
3876  if (interruptMsgProc) return;
3877 
3878  // Ignore INVs that don't match wtxidrelay setting.
3879  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
3880  // This is fine as no INV messages are involved in that process.
3881  if (peer->m_wtxid_relay) {
3882  if (inv.IsMsgTx()) continue;
3883  } else {
3884  if (inv.IsMsgWtx()) continue;
3885  }
3886 
3887  if (inv.IsMsgBlk()) {
3888  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
3889  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3890 
3891  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
3892  if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
3893  // Headers-first is the primary method of announcement on
3894  // the network. If a node fell back to sending blocks by
3895  // inv, it may be for a re-org, or because we haven't
3896  // completed initial headers sync. The final block hash
3897  // provided should be the highest, so send a getheaders and
3898  // then fetch the blocks we need to catch up.
3899  best_block = &inv.hash;
3900  }
3901  } else if (inv.IsGenTxMsg()) {
3902  if (reject_tx_invs) {
3903  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
3904  pfrom.fDisconnect = true;
3905  return;
3906  }
3907  const GenTxid gtxid = ToGenTxid(inv);
3908  const bool fAlreadyHave = AlreadyHaveTx(gtxid);
3909  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3910 
3911  AddKnownTx(*peer, inv.hash);
3912  if (!fAlreadyHave && !m_chainman.IsInitialBlockDownload()) {
3913  AddTxAnnouncement(pfrom, gtxid, current_time);
3914  }
3915  } else {
3916  LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
3917  }
3918  }
3919 
3920  if (best_block != nullptr) {
3921  // If we haven't started initial headers-sync with this peer, then
3922  // consider sending a getheaders now. On initial startup, there's a
3923  // reliability vs bandwidth tradeoff, where we are only trying to do
3924  // initial headers sync with one peer at a time, with a long
3925  // timeout (at which point, if the sync hasn't completed, we will
3926  // disconnect the peer and then choose another). In the meantime,
3927  // as new blocks are found, we are willing to add one new peer per
3928  // block to sync with as well, to sync quicker in the case where
3929  // our initial peer is unresponsive (but less bandwidth than we'd
3930  // use if we turned on sync with all peers).
3931  CNodeState& state{*Assert(State(pfrom.GetId()))};
3932  if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
3933  if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
3934  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3935  m_chainman.m_best_header->nHeight, best_block->ToString(),
3936  pfrom.GetId());
3937  }
3938  if (!state.fSyncStarted) {
3939  peer->m_inv_triggered_getheaders_before_sync = true;
3940  // Update the last block hash that triggered a new headers
3941  // sync, so that we don't turn on headers sync with more
3942  // than 1 new peer every new block.
3943  m_last_block_inv_triggering_headers_sync = *best_block;
3944  }
3945  }
3946  }
3947 
3948  return;
3949  }
3950 
3951  if (msg_type == NetMsgType::GETDATA) {
3952  std::vector<CInv> vInv;
3953  vRecv >> vInv;
3954  if (vInv.size() > MAX_INV_SZ)
3955  {
3956  Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size()));
3957  return;
3958  }
3959 
3960  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
3961 
3962  if (vInv.size() > 0) {
3963  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
3964  }
3965 
3966  {
3967  LOCK(peer->m_getdata_requests_mutex);
3968  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
3969  ProcessGetData(pfrom, *peer, interruptMsgProc);
3970  }
3971 
3972  return;
3973  }
3974 
3975  if (msg_type == NetMsgType::GETBLOCKS) {
3976  CBlockLocator locator;
3977  uint256 hashStop;
3978  vRecv >> locator >> hashStop;
3979 
3980  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3981  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3982  pfrom.fDisconnect = true;
3983  return;
3984  }
3985 
3986  // We might have announced the currently-being-connected tip using a
3987  // compact block, which resulted in the peer sending a getblocks
3988  // request, which we would otherwise respond to without the new block.
3989  // To avoid this situation we simply verify that we are on our best
3990  // known chain now. This is super overkill, but we handle it better
3991  // for getheaders requests, and there are no known nodes which support
3992  // compact blocks but still use getblocks to request blocks.
3993  {
3994  std::shared_ptr<const CBlock> a_recent_block;
3995  {
3996  LOCK(m_most_recent_block_mutex);
3997  a_recent_block = m_most_recent_block;
3998  }
3999  BlockValidationState state;
4000  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4001  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4002  }
4003  }
4004 
4005  LOCK(cs_main);
4006 
4007  // Find the last block the caller has in the main chain
4008  const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4009 
4010  // Send the rest of the chain
4011  if (pindex)
4012  pindex = m_chainman.ActiveChain().Next(pindex);
4013  int nLimit = 500;
4014  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4015  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4016  {
4017  if (pindex->GetBlockHash() == hashStop)
4018  {
4019  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4020  break;
4021  }
4022  // If pruning, don't inv blocks unless we have on disk and are likely to still have
4023  // for some reasonable time window (1 hour) that block relay might require.
4024  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4025  if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4026  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4027  break;
4028  }
4029  WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4030  if (--nLimit <= 0) {
4031  // When this block is requested, we'll send an inv that'll
4032  // trigger the peer to getblocks the next batch of inventory.
4033  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4034  WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4035  break;
4036  }
4037  }
4038  return;
4039  }
4040 
4041  if (msg_type == NetMsgType::GETBLOCKTXN) {
4043  vRecv >> req;
4044 
4045  std::shared_ptr<const CBlock> recent_block;
4046  {
4047  LOCK(m_most_recent_block_mutex);
4048  if (m_most_recent_block_hash == req.blockhash)
4049  recent_block = m_most_recent_block;
4050  // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4051  }
4052  if (recent_block) {
4053  SendBlockTransactions(pfrom, *peer, *recent_block, req);
4054  return;
4055  }
4056 
4057  {
4058  LOCK(cs_main);
4059 
4060  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4061  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4062  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4063  return;
4064  }
4065 
4066  if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4067  CBlock block;
4068  const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
4069  assert(ret);
4070 
4071  SendBlockTransactions(pfrom, *peer, block, req);
4072  return;
4073  }
4074  }
4075 
4076  // If an older block is requested (should never happen in practice,
4077  // but can happen in tests) send a block response instead of a
4078  // blocktxn response. Sending a full block response instead of a
4079  // small blocktxn response is preferable in the case where a peer
4080  // might maliciously send lots of getblocktxn requests to trigger
4081  // expensive disk reads, because it will require the peer to
4082  // actually receive all the data read from disk over the network.
4083  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4084  CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
4085  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4086  // The message processing loop will go around again (without pausing) and we'll respond then
4087  return;
4088  }
4089 
4090  if (msg_type == NetMsgType::GETHEADERS) {
4091  CBlockLocator locator;
4092  uint256 hashStop;
4093  vRecv >> locator >> hashStop;
4094 
4095  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4096  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4097  pfrom.fDisconnect = true;
4098  return;
4099  }
4100 
4101  if (m_chainman.m_blockman.LoadingBlocks()) {
4102  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4103  return;
4104  }
4105 
4106  LOCK(cs_main);
4107 
4108  // Note that if we were to be on a chain that forks from the checkpointed
4109  // chain, then serving those headers to a peer that has seen the
4110  // checkpointed chain would cause that peer to disconnect us. Requiring
4111  // that our chainwork exceed the minimum chain work is a protection against
4112  // being fed a bogus chain when we started up for the first time and
4113  // getting partitioned off the honest network for serving that chain to
4114  // others.
4115  if (m_chainman.ActiveTip() == nullptr ||
4116  (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4117  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4118  // Just respond with an empty headers message, to tell the peer to
4119  // go away but not treat us as unresponsive.
4120  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>()));
4121  return;
4122  }
4123 
4124  CNodeState *nodestate = State(pfrom.GetId());
4125  const CBlockIndex* pindex = nullptr;
4126  if (locator.IsNull())
4127  {
4128  // If locator is null, return the hashStop block
4129  pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4130  if (!pindex) {
4131  return;
4132  }
4133 
4134  if (!BlockRequestAllowed(pindex)) {
4135  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4136  return;
4137  }
4138  }
4139  else
4140  {
4141  // Find the last block the caller has in the main chain
4142  pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4143  if (pindex)
4144  pindex = m_chainman.ActiveChain().Next(pindex);
4145  }
4146 
4147  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4148  std::vector<CBlock> vHeaders;
4149  int nLimit = MAX_HEADERS_RESULTS;
4150  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4151  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4152  {
4153  vHeaders.emplace_back(pindex->GetBlockHeader());
4154  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4155  break;
4156  }
4157  // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4158  // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4159  // headers message). In both cases it's safe to update
4160  // pindexBestHeaderSent to be our tip.
4161  //
4162  // It is important that we simply reset the BestHeaderSent value here,
4163  // and not max(BestHeaderSent, newHeaderSent). We might have announced
4164  // the currently-being-connected tip using a compact block, which
4165  // resulted in the peer sending a headers request, which we respond to
4166  // without the new block. By resetting the BestHeaderSent, we ensure we
4167  // will re-announce the new block via headers (or compact blocks again)
4168  // in the SendMessages logic.
4169  nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4170  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4171  return;
4172  }
4173 
4174  if (msg_type == NetMsgType::TX) {
4175  if (RejectIncomingTxs(pfrom)) {
4176  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
4177  pfrom.fDisconnect = true;
4178  return;
4179  }
4180 
4181  // Stop processing the transaction early if we are still in IBD since we don't
4182  // have enough information to validate it yet. Sending unsolicited transactions
4183  // is not considered a protocol violation, so don't punish the peer.
4184  if (m_chainman.IsInitialBlockDownload()) return;
4185 
4186  CTransactionRef ptx;
4187  vRecv >> ptx;
4188  const CTransaction& tx = *ptx;
4189 
4190  const uint256& txid = ptx->GetHash();
4191  const uint256& wtxid = ptx->GetWitnessHash();
4192 
4193  const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4194  AddKnownTx(*peer, hash);
4195 
4196  LOCK(cs_main);
4197 
4198  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
4199  if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
4200 
4201  // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
4202  // absence of witness malleation, this is strictly better, because the
4203  // recent rejects filter may contain the wtxid but rarely contains
4204  // the txid of a segwit transaction that has been rejected.
4205  // In the presence of witness malleation, it's possible that by only
4206  // doing the check with wtxid, we could overlook a transaction which
4207  // was confirmed with a different witness, or exists in our mempool
4208  // with a different witness, but this has limited downside:
4209  // mempool validation does its own lookup of whether we have the txid
4210  // already; and an adversary can already relay us old transactions
4211  // (older than our recency filter) if trying to DoS us, without any need
4212  // for witness malleation.
4213  if (AlreadyHaveTx(GenTxid::Wtxid(wtxid))) {
4215  // Always relay transactions received from peers with forcerelay
4216  // permission, even if they were already in the mempool, allowing
4217  // the node to function as a gateway for nodes hidden behind it.
4218  if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4219  LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4220  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4221  } else {
4222  LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4223  tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4224  RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4225  }
4226  }
4227  // If a tx is detected by m_recent_rejects it is ignored. Because we haven't
4228  // submitted the tx to our mempool, we won't have computed a DoS
4229  // score for it or determined exactly why we consider it invalid.
4230  //
4231  // This means we won't penalize any peer subsequently relaying a DoSy
4232  // tx (even if we penalized the first peer who gave it to us) because
4233  // we have to account for m_recent_rejects showing false positives. In
4234  // other words, we shouldn't penalize a peer if we aren't *sure* they
4235  // submitted a DoSy tx.
4236  //
4237  // Note that m_recent_rejects doesn't just record DoSy or invalid
4238  // transactions, but any tx not accepted by the mempool, which may be
4239  // due to node policy (vs. consensus). So we can't blanket penalize a
4240  // peer simply for relaying a tx that our m_recent_rejects has caught,
4241  // regardless of false positives.
4242  return;
4243  }
4244 
4245  const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4246  const TxValidationState& state = result.m_state;
4247 
4249  // As this version of the transaction was acceptable, we can forget about any
4250  // requests for it.
4251  m_txrequest.ForgetTxHash(tx.GetHash());
4252  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4253  RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4254  m_orphanage.AddChildrenToWorkSet(tx);
4255 
4256  pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4257 
4258  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
4259  pfrom.GetId(),
4260  tx.GetHash().ToString(),
4261  tx.GetWitnessHash().ToString(),
4262  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
4263 
4264  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
4265  AddToCompactExtraTransactions(removedTx);
4266  }
4267  }
4269  {
4270  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
4271 
4272  // Deduplicate parent txids, so that we don't have to loop over
4273  // the same parent txid more than once down below.
4274  std::vector<uint256> unique_parents;
4275  unique_parents.reserve(tx.vin.size());
4276  for (const CTxIn& txin : tx.vin) {
4277  // We start with all parents, and then remove duplicates below.
4278  unique_parents.push_back(txin.prevout.hash);
4279  }
4280  std::sort(unique_parents.begin(), unique_parents.end());
4281  unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
4282  for (const uint256& parent_txid : unique_parents) {
4283  if (m_recent_rejects.contains(parent_txid)) {
4284  fRejectedParents = true;
4285  break;
4286  }
4287  }
4288  if (!fRejectedParents) {
4289  const auto current_time{GetTime<std::chrono::microseconds>()};
4290 
4291  for (const uint256& parent_txid : unique_parents) {
4292  // Here, we only have the txid (and not wtxid) of the
4293  // inputs, so we only request in txid mode, even for
4294  // wtxidrelay peers.
4295  // Eventually we should replace this with an improved
4296  // protocol for getting all unconfirmed parents.
4297  const auto gtxid{GenTxid::Txid(parent_txid)};
4298  AddKnownTx(*peer, parent_txid);
4299  if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
4300  }
4301 
4302  if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
4303  AddToCompactExtraTransactions(ptx);
4304  }
4305 
4306  // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
4307  m_txrequest.ForgetTxHash(tx.GetHash());
4308  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4309 
4310  // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
4311  m_orphanage.LimitOrphans(m_opts.max_orphan_txs);
4312  } else {
4313  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n",
4314  tx.GetHash().ToString(),
4315  tx.GetWitnessHash().ToString());
4316  // We will continue to reject this tx since it has rejected
4317  // parents so avoid re-requesting it from other peers.
4318  // Here we add both the txid and the wtxid, as we know that
4319  // regardless of what witness is provided, we will not accept
4320  // this, so we don't need to allow for redownload of this txid
4321  // from any of our non-wtxidrelay peers.
4322  m_recent_rejects.insert(tx.GetHash());
4323  m_recent_rejects.insert(tx.GetWitnessHash());
4324  m_txrequest.ForgetTxHash(tx.GetHash());
4325  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4326  }
4327  } else {
4329  // We can add the wtxid of this transaction to our reject filter.
4330  // Do not add txids of witness transactions or witness-stripped
4331  // transactions to the filter, as they can have been malleated;
4332  // adding such txids to the reject filter would potentially
4333  // interfere with relay of valid transactions from peers that
4334  // do not support wtxid-based relay. See
4335  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
4336  // We can remove this restriction (and always add wtxids to
4337  // the filter even for witness stripped transactions) once
4338  // wtxid-based relay is broadly deployed.
4339  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
4340  // for concerns around weakening security of unupgraded nodes
4341  // if we start doing this too early.
4342  m_recent_rejects.insert(tx.GetWitnessHash());
4343  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4344  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
4345  // then we know that the witness was irrelevant to the policy
4346  // failure, since this check depends only on the txid
4347  // (the scriptPubKey being spent is covered by the txid).
4348  // Add the txid to the reject filter to prevent repeated
4349  // processing of this transaction in the event that child
4350  // transactions are later received (resulting in
4351  // parent-fetching by txid via the orphan-handling logic).
4353  m_recent_rejects.insert(tx.GetHash());
4354  m_txrequest.ForgetTxHash(tx.GetHash());
4355  }
4356  if (RecursiveDynamicUsage(*ptx) < 100000) {
4357  AddToCompactExtraTransactions(ptx);
4358  }
4359  }
4360  }
4361 
4362  if (state.IsInvalid()) {
4363  LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
4364  tx.GetHash().ToString(),
4365  tx.GetWitnessHash().ToString(),
4366  pfrom.GetId(),
4367  state.ToString());
4368  MaybePunishNodeForTx(pfrom.GetId(), state);
4369  }
4370  return;
4371  }
4372 
4373  if (msg_type == NetMsgType::CMPCTBLOCK)
4374  {
4375  // Ignore cmpctblock received while importing
4376  if (m_chainman.m_blockman.LoadingBlocks()) {
4377  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4378  return;
4379  }
4380 
4381  CBlockHeaderAndShortTxIDs cmpctblock;
4382  vRecv >> cmpctblock;
4383 
4384  bool received_new_header = false;
4385  const auto blockhash = cmpctblock.header.GetHash();
4386 
4387  {
4388  LOCK(cs_main);
4389 
4390  const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4391  if (!prev_block) {
4392  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4393  if (!m_chainman.IsInitialBlockDownload()) {
4394  MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4395  }
4396  return;
4397  } else if (prev_block->nChainWork + CalculateHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) {
4398  // If we get a low-work header in a compact block, we can ignore it.
4399  LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4400  return;
4401  }
4402 
4403  if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4404  received_new_header = true;
4405  }
4406  }
4407 
4408  const CBlockIndex *pindex = nullptr;
4409  BlockValidationState state;
4410  if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) {
4411  if (state.IsInvalid()) {
4412  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4413  return;
4414  }
4415  }
4416 
4417  if (received_new_header) {
4418  LogPrintfCategory(BCLog::NET, "Saw new cmpctblock header hash=%s peer=%d\n",
4419  blockhash.ToString(), pfrom.GetId());
4420  }
4421 
4422  bool fProcessBLOCKTXN = false;
4423 
4424  // If we end up treating this as a plain headers message, call that as well
4425  // without cs_main.
4426  bool fRevertToHeaderProcessing = false;
4427 
4428  // Keep a CBlock for "optimistic" compactblock reconstructions (see
4429  // below)
4430  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4431  bool fBlockReconstructed = false;
4432 
4433  {
4434  LOCK(cs_main);
4435  // If AcceptBlockHeader returned true, it set pindex
4436  assert(pindex);
4437  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4438 
4439  CNodeState *nodestate = State(pfrom.GetId());
4440 
4441  // If this was a new header with more work than our tip, update the
4442  // peer's last block announcement time
4443  if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4444  nodestate->m_last_block_announcement = GetTime();
4445  }
4446 
4447  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4448  return;
4449 
4450  auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4451  size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4452  bool requested_block_from_this_peer{false};
4453 
4454  // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4455  bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4456 
4457  while (range_flight.first != range_flight.second) {
4458  if (range_flight.first->second.first == pfrom.GetId()) {
4459  requested_block_from_this_peer = true;
4460  break;
4461  }
4462  range_flight.first++;
4463  }
4464 
4465  if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4466  pindex->nTx != 0) { // We had this block at some point, but pruned it
4467  if (requested_block_from_this_peer) {
4468  // We requested this block for some reason, but our mempool will probably be useless
4469  // so we just grab the block via normal getdata
4470  std::vector<CInv> vInv(1);
4471  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4472  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4473  }
4474  return;
4475  }
4476 
4477  // If we're not close to tip yet, give up and let parallel block fetch work its magic
4478  if (!already_in_flight && !CanDirectFetch()) {
4479  return;
4480  }
4481 
4482  // We want to be a bit conservative just to be extra careful about DoS
4483  // possibilities in compact block processing...
4484  if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4485  if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4486  requested_block_from_this_peer) {
4487  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4488  if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4489  if (!(*queuedBlockIt)->partialBlock)
4490  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4491  else {
4492  // The block was already in flight using compact blocks from the same peer
4493  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4494  return;
4495  }
4496  }
4497 
4498  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4499  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4500  if (status == READ_STATUS_INVALID) {
4501  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4502  Misbehaving(*peer, 100, "invalid compact block");
4503  return;
4504  } else if (status == READ_STATUS_FAILED) {
4505  if (first_in_flight) {
4506  // Duplicate txindexes, the block is now in-flight, so just request it
4507  std::vector<CInv> vInv(1);
4508  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4509  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4510  } else {
4511  // Give up for this peer and wait for other peer(s)
4512  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4513  }
4514  return;
4515  }
4516 
4518  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4519  if (!partialBlock.IsTxAvailable(i))
4520  req.indexes.push_back(i);
4521  }
4522  if (req.indexes.empty()) {
4523  fProcessBLOCKTXN = true;
4524  } else if (first_in_flight) {
4525  // We will try to round-trip any compact blocks we get on failure,
4526  // as long as it's first...
4527  req.blockhash = pindex->GetBlockHash();
4528  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
4529  } else if (pfrom.m_bip152_highbandwidth_to &&
4530  (!pfrom.IsInboundConn() ||
4531  IsBlockRequestedFromOutbound(blockhash) ||
4532  already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4533  // ... or it's a hb relay peer and:
4534  // - peer is outbound, or
4535  // - we already have an outbound attempt in flight(so we'll take what we can get), or
4536  // - it's not the final parallel download slot (which we may reserve for first outbound)
4537  req.blockhash = pindex->GetBlockHash();
4538  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
4539  } else {
4540  // Give up for this peer and wait for other peer(s)
4541  RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4542  }
4543  } else {
4544  // This block is either already in flight from a different
4545  // peer, or this peer has too many blocks outstanding to
4546  // download from.
4547  // Optimistically try to reconstruct anyway since we might be
4548  // able to without any round trips.
4549  PartiallyDownloadedBlock tempBlock(&m_mempool);
4550  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4551  if (status != READ_STATUS_OK) {
4552  // TODO: don't ignore failures
4553  return;
4554  }
4555  std::vector<CTransactionRef> dummy;
4556  status = tempBlock.FillBlock(*pblock, dummy);
4557  if (status == READ_STATUS_OK) {
4558  fBlockReconstructed = true;
4559  }
4560  }
4561  } else {
4562  if (requested_block_from_this_peer) {
4563  // We requested this block, but its far into the future, so our
4564  // mempool will probably be useless - request the block normally
4565  std::vector<CInv> vInv(1);
4566  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4567  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4568  return;
4569  } else {
4570  // If this was an announce-cmpctblock, we want the same treatment as a header message
4571  fRevertToHeaderProcessing = true;
4572  }
4573  }
4574  } // cs_main
4575 
4576  if (fProcessBLOCKTXN) {
4577  BlockTransactions txn;
4578  txn.blockhash = blockhash;
4579  return ProcessCompactBlockTxns(pfrom, *peer, txn);
4580  }
4581 
4582  if (fRevertToHeaderProcessing) {
4583  // Headers received from HB compact block peers are permitted to be
4584  // relayed before full validation (see BIP 152), so we don't want to disconnect
4585  // the peer if the header turns out to be for an invalid block.
4586  // Note that if a peer tries to build on an invalid chain, that
4587  // will be detected and the peer will be disconnected/discouraged.
4588  return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4589  }
4590 
4591  if (fBlockReconstructed) {
4592  // If we got here, we were able to optimistically reconstruct a
4593  // block that is in flight from some other peer.
4594  {
4595  LOCK(cs_main);
4596  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4597  }
4598  // Setting force_processing to true means that we bypass some of
4599  // our anti-DoS protections in AcceptBlock, which filters
4600  // unrequested blocks that might be trying to waste our resources
4601  // (eg disk space). Because we only try to reconstruct blocks when
4602  // we're close to caught up (via the CanDirectFetch() requirement
4603  // above, combined with the behavior of not requesting blocks until
4604  // we have a chain with at least the minimum chain work), and we ignore
4605  // compact blocks with less work than our tip, it is safe to treat
4606  // reconstructed compact blocks as having been requested.
4607  ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4608  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4609  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4610  // Clear download state for this block, which is in
4611  // process from some other peer. We do this after calling
4612  // ProcessNewBlock so that a malleated cmpctblock announcement
4613  // can't be used to interfere with block relay.
4614  RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4615  }
4616  }
4617  return;
4618  }
4619 
4620  if (msg_type == NetMsgType::BLOCKTXN)
4621  {
4622  // Ignore blocktxn received while importing
4623  if (m_chainman.m_blockman.LoadingBlocks()) {
4624  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4625  return;
4626  }
4627 
4628  BlockTransactions resp;
4629  vRecv >> resp;
4630 
4631  return ProcessCompactBlockTxns(pfrom, *peer, resp);
4632  }
4633 
4634  if (msg_type == NetMsgType::HEADERS)
4635  {
4636  // Ignore headers received while importing
4637  if (m_chainman.m_blockman.LoadingBlocks()) {
4638  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4639  return;
4640  }
4641 
4642  // Assume that this is in response to any outstanding getheaders
4643  // request we may have sent, and clear out the time of our last request
4644  peer->m_last_getheaders_timestamp = {};
4645 
4646  std::vector<CBlockHeader> headers;
4647 
4648  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4649  unsigned int nCount = ReadCompactSize(vRecv);
4650  if (nCount > MAX_HEADERS_RESULTS) {
4651  Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount));
4652  return;
4653  }
4654  headers.resize(nCount);
4655  for (unsigned int n = 0; n < nCount; n++) {
4656  vRecv >> headers[n];
4657  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4658  }
4659 
4660  ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
4661 
4662  // Check if the headers presync progress needs to be reported to validation.
4663  // This needs to be done without holding the m_headers_presync_mutex lock.
4664  if (m_headers_presync_should_signal.exchange(false)) {
4665  HeadersPresyncStats stats;
4666  {
4667  LOCK(m_headers_presync_mutex);
4668  auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4669  if (it != m_headers_presync_stats.end()) stats = it->second;
4670  }
4671  if (stats.second) {
4672  m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
4673  }
4674  }
4675 
4676  return;
4677  }
4678 
4679  if (msg_type == NetMsgType::BLOCK)
4680  {
4681  // Ignore block received while importing
4682  if (m_chainman.m_blockman.LoadingBlocks()) {
4683  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4684  return;
4685  }
4686 
4687  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4688  vRecv >> *pblock;
4689 
4690  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4691 
4692  const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
4693 
4694  // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
4695  if (prev_block && IsBlockMutated(/*block=*/*pblock,
4696  /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
4697  LogPrint(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
4698  Misbehaving(*peer, 100, "mutated block");
4699  WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
4700  return;
4701  }
4702 
4703  bool forceProcessing = false;
4704  const uint256 hash(pblock->GetHash());
4705  bool min_pow_checked = false;
4706  {
4707  LOCK(cs_main);
4708  // Always process the block if we requested it, since we may
4709  // need it even when it's not a candidate for a new best tip.
4710  forceProcessing = IsBlockRequested(hash);
4711  RemoveBlockRequest(hash, pfrom.GetId());
4712  // mapBlockSource is only used for punishing peers and setting
4713  // which peers send us compact blocks, so the race between here and
4714  // cs_main in ProcessNewBlock is fine.
4715  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4716 
4717  // Check work on this block against our anti-dos thresholds.
4718  if (prev_block && prev_block->nChainWork + CalculateHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) {
4719  min_pow_checked = true;
4720  }
4721  }
4722  ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4723  return;
4724  }
4725 
4726  if (msg_type == NetMsgType::GETADDR) {
4727  // This asymmetric behavior for inbound and outbound connections was introduced
4728  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4729  // to users' AddrMan and later request them by sending getaddr messages.
4730  // Making nodes which are behind NAT and can only make outgoing connections ignore
4731  // the getaddr message mitigates the attack.
4732  if (!pfrom.IsInboundConn()) {
4733  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4734  return;
4735  }
4736 
4737  // Since this must be an inbound connection, SetupAddressRelay will
4738  // never fail.
4739  Assume(SetupAddressRelay(pfrom, *peer));
4740 
4741  // Only send one GetAddr response per connection to reduce resource waste
4742  // and discourage addr stamping of INV announcements.
4743  if (peer->m_getaddr_recvd) {
4744  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4745  return;
4746  }
4747  peer->m_getaddr_recvd = true;
4748 
4749  peer->m_addrs_to_send.clear();
4750  std::vector<CAddress> vAddr;
4752  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4753  } else {
4754  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4755  }
4756  for (const CAddress &addr : vAddr) {
4757  PushAddress(*peer, addr);
4758  }
4759  return;
4760  }
4761 
4762  if (msg_type == NetMsgType::MEMPOOL) {
4763  // Only process received mempool messages if we advertise NODE_BLOOM
4764  // or if the peer has mempool permissions.
4765  if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4766  {
4768  {
4769  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
4770  pfrom.fDisconnect = true;
4771  }
4772  return;
4773  }
4774 
4775  if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4776  {
4778  {
4779  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
4780  pfrom.fDisconnect = true;
4781  }
4782  return;
4783  }
4784 
4785  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4786  LOCK(tx_relay->m_tx_inventory_mutex);
4787  tx_relay->m_send_mempool = true;
4788  }
4789  return;
4790  }
4791 
4792  if (msg_type == NetMsgType::PING) {
4793  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4794  uint64_t nonce = 0;
4795  vRecv >> nonce;
4796  // Echo the message back with the nonce. This allows for two useful features:
4797  //
4798  // 1) A remote node can quickly check if the connection is operational
4799  // 2) Remote nodes can measure the latency of the network thread. If this node
4800  // is overloaded it won't respond to pings quickly and the remote node can
4801  // avoid sending us more work, like chain download requests.
4802  //
4803  // The nonce stops the remote getting confused between different pings: without
4804  // it, if the remote node sends a ping once per second and this node takes 5
4805  // seconds to respond to each, the 5th ping the remote sends would appear to
4806  // return very quickly.
4807  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
4808  }
4809  return;
4810  }
4811 
4812  if (msg_type == NetMsgType::PONG) {
4813  const auto ping_end = time_received;
4814  uint64_t nonce = 0;
4815  size_t nAvail = vRecv.in_avail();
4816  bool bPingFinished = false;
4817  std::string sProblem;
4818 
4819  if (nAvail >= sizeof(nonce)) {
4820  vRecv >> nonce;
4821 
4822  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
4823  if (peer->m_ping_nonce_sent != 0) {
4824  if (nonce == peer->m_ping_nonce_sent) {
4825  // Matching pong received, this ping is no longer outstanding
4826  bPingFinished = true;
4827  const auto ping_time = ping_end - peer->m_ping_start.load();
4828  if (ping_time.count() >= 0) {
4829  // Let connman know about this successful ping-pong
4830  pfrom.PongReceived(ping_time);
4831  } else {
4832  // This should never happen
4833  sProblem = "Timing mishap";
4834  }
4835  } else {
4836  // Nonce mismatches are normal when pings are overlapping
4837  sProblem = "Nonce mismatch";
4838  if (nonce == 0) {
4839  // This is most likely a bug in another implementation somewhere; cancel this ping
4840  bPingFinished = true;
4841  sProblem = "Nonce zero";
4842  }
4843  }
4844  } else {
4845  sProblem = "Unsolicited pong without ping";
4846  }
4847  } else {
4848  // This is most likely a bug in another implementation somewhere; cancel this ping
4849  bPingFinished = true;
4850  sProblem = "Short payload";
4851  }
4852 
4853  if (!(sProblem.empty())) {
4854  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4855  pfrom.GetId(),
4856  sProblem,
4857  peer->m_ping_nonce_sent,
4858  nonce,
4859  nAvail);
4860  }
4861  if (bPingFinished) {
4862  peer->m_ping_nonce_sent = 0;
4863  }
4864  return;
4865  }
4866 
4867  if (msg_type == NetMsgType::FILTERLOAD) {
4868  if (!(peer->m_our_services & NODE_BLOOM)) {
4869  LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4870  pfrom.fDisconnect = true;
4871  return;
4872  }
4873  CBloomFilter filter;
4874  vRecv >> filter;
4875 
4876  if (!filter.IsWithinSizeConstraints())
4877  {
4878  // There is no excuse for sending a too-large filter
4879  Misbehaving(*peer, 100, "too-large bloom filter");
4880  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4881  {
4882  LOCK(tx_relay->m_bloom_filter_mutex);
4883  tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
4884  tx_relay->m_relay_txs = true;
4885  }
4886  pfrom.m_bloom_filter_loaded = true;
4887  pfrom.m_relays_txs = true;
4888  }
4889  return;
4890  }
4891 
4892  if (msg_type == NetMsgType::FILTERADD) {
4893  if (!(peer->m_our_services & NODE_BLOOM)) {
4894  LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4895  pfrom.fDisconnect = true;
4896  return;
4897  }
4898  std::vector<unsigned char> vData;
4899  vRecv >> vData;
4900 
4901  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
4902  // and thus, the maximum size any matched object can have) in a filteradd message
4903  bool bad = false;
4904  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4905  bad = true;
4906  } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4907  LOCK(tx_relay->m_bloom_filter_mutex);
4908  if (tx_relay->m_bloom_filter) {
4909  tx_relay->m_bloom_filter->insert(vData);
4910  } else {
4911  bad = true;
4912  }
4913  }
4914  if (bad) {
4915  Misbehaving(*peer, 100, "bad filteradd message");
4916  }
4917  return;
4918  }
4919 
4920  if (msg_type == NetMsgType::FILTERCLEAR) {
4921  if (!(peer->m_our_services & NODE_BLOOM)) {
4922  LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4923  pfrom.fDisconnect = true;
4924  return;
4925  }
4926  auto tx_relay = peer->GetTxRelay();
4927  if (!tx_relay) return;
4928 
4929  {
4930  LOCK(tx_relay->m_bloom_filter_mutex);
4931  tx_relay->m_bloom_filter = nullptr;
4932  tx_relay->m_relay_txs = true;
4933  }
4934  pfrom.m_bloom_filter_loaded = false;
4935  pfrom.m_relays_txs = true;
4936  return;
4937  }
4938 
4939  if (msg_type == NetMsgType::FEEFILTER) {
4940  CAmount newFeeFilter = 0;
4941  vRecv >> newFeeFilter;
4942  if (MoneyRange(newFeeFilter)) {
4943  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4944  tx_relay->m_fee_filter_received = newFeeFilter;
4945  }
4946  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4947  }
4948  return;
4949  }
4950 
4951  if (msg_type == NetMsgType::GETCFILTERS) {
4952  ProcessGetCFilters(pfrom, *peer, vRecv);
4953  return;
4954  }
4955 
4956  if (msg_type == NetMsgType::GETCFHEADERS) {
4957  ProcessGetCFHeaders(pfrom, *peer, vRecv);
4958  return;
4959  }
4960 
4961  if (msg_type == NetMsgType::GETCFCHECKPT) {
4962  ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4963  return;
4964  }
4965 
4966  if (msg_type == NetMsgType::NOTFOUND) {
4967  std::vector<CInv> vInv;
4968  vRecv >> vInv;
4970  LOCK(::cs_main);
4971  for (CInv &inv : vInv) {
4972  if (inv.IsGenTxMsg()) {
4973  // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
4974  // completed in TxRequestTracker.
4975  m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
4976  }
4977  }
4978  }
4979  return;
4980  }
4981 
4982  // Ignore unknown commands for extensibility
4983  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4984  return;
4985 }
4986 
4987 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
4988 {
4989  {
4990  LOCK(peer.m_misbehavior_mutex);
4991 
4992  // There's nothing to do if the m_should_discourage flag isn't set
4993  if (!peer.m_should_discourage) return false;
4994 
4995  peer.m_should_discourage = false;
4996  } // peer.m_misbehavior_mutex
4997 
4999  // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
5000  LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
5001  return false;
5002  }
5003 
5004  if (pnode.IsManualConn()) {
5005  // We never disconnect or discourage manual peers for bad behavior
5006  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
5007  return false;
5008  }
5009 
5010  if (pnode.addr.IsLocal()) {
5011  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
5012  // all peers on the same local address)
5013  LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
5014  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
5015  pnode.fDisconnect = true;
5016  return true;
5017  }
5018 
5019  // Normal case: Disconnect the peer and discourage all nodes sharing the address
5020  LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
5021  if (m_banman) m_banman->Discourage(pnode.addr);
5022  m_connman.DisconnectNode(pnode.addr);
5023  return true;
5024 }
5025 
5026 bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
5027 {
5028  AssertLockHeld(g_msgproc_mutex);
5029 
5030  PeerRef peer = GetPeerRef(pfrom->GetId());
5031  if (peer == nullptr) return false;
5032 
5033  {
5034  LOCK(peer->m_getdata_requests_mutex);
5035  if (!peer->m_getdata_requests.empty()) {
5036  ProcessGetData(*pfrom, *peer, interruptMsgProc);
5037  }
5038  }
5039 
5040  const bool processed_orphan = ProcessOrphanTx(*peer);
5041 
5042  if (pfrom->fDisconnect)
5043  return false;
5044 
5045  if (processed_orphan) return true;
5046 
5047  // this maintains the order of responses
5048  // and prevents m_getdata_requests to grow unbounded
5049  {
5050  LOCK(peer->m_getdata_requests_mutex);
5051  if (!peer->m_getdata_requests.empty()) return true;
5052  }
5053 
5054  // Don't bother if send buffer is too full to respond anyway
5055  if (pfrom->fPauseSend) return false;
5056 
5057  auto poll_result{pfrom->PollMessage()};
5058  if (!poll_result) {
5059  // No message to process
5060  return false;
5061  }
5062 
5063  CNetMessage& msg{poll_result->first};
5064  bool fMoreWork = poll_result->second;
5065 
5066  TRACE6(net, inbound_message,
5067  pfrom->GetId(),
5068  pfrom->m_addr_name.c_str(),
5069  pfrom->ConnectionTypeAsString().c_str(),
5070  msg.m_type.c_str(),
5071  msg.m_recv.size(),
5072  msg.m_recv.data()
5073  );
5074 
5075  if (m_opts.capture_messages) {
5076  CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
5077  }
5078 
5079  msg.SetVersion(pfrom->GetCommonVersion());
5080 
5081  try {
5082  ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
5083  if (interruptMsgProc) return false;
5084  {
5085  LOCK(peer->m_getdata_requests_mutex);
5086  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
5087  }
5088  // Does this peer has an orphan ready to reconsider?
5089  // (Note: we may have provided a parent for an orphan provided
5090  // by another peer that was already processed; in that case,
5091  // the extra work may not be noticed, possibly resulting in an
5092  // unnecessary 100ms delay)
5093  if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true;
5094  } catch (const std::exception& e) {
5095  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5096  } catch (...) {
5097  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5098  }
5099 
5100  return fMoreWork;
5101 }
5102 
5103 void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5104 {
5106 
5107  CNodeState &state = *State(pto.GetId());
5108 
5109  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5110  // This is an outbound peer subject to disconnection if they don't
5111  // announce a block with as much work as the current tip within
5112  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5113  // their chain has more work than ours, we should sync to it,
5114  // unless it's invalid, in which case we should find that out and
5115  // disconnect from them elsewhere).
5116  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5117  if (state.m_chain_sync.m_timeout != 0s) {
5118  state.m_chain_sync.m_timeout = 0s;
5119  state.m_chain_sync.m_work_header = nullptr;
5120  state.m_chain_sync.m_sent_getheaders = false;
5121  }
5122  } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5123  // Our best block known by this peer is behind our tip, and we're either noticing
5124  // that for the first time, OR this peer was able to catch up to some earlier point
5125  // where we checked against our tip.
5126  // Either way, set a new timeout based on current tip.
5127  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5128  state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5129  state.m_chain_sync.m_sent_getheaders = false;
5130  } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5131  // No evidence yet that our peer has synced to a chain with work equal to that
5132  // of our tip, when we first detected it was behind. Send a single getheaders
5133  // message to give the peer a chance to update us.
5134  if (state.m_chain_sync.m_sent_getheaders) {
5135  // They've run out of time to catch up!
5136  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
5137  pto.fDisconnect = true;
5138  } else {
5139  assert(state.m_chain_sync.m_work_header);
5140  // Here, we assume that the getheaders message goes out,
5141  // because it'll either go out or be skipped because of a
5142  // getheaders in-flight already, in which case the peer should
5143  // still respond to us with a sufficiently high work chain tip.
5144  MaybeSendGetHeaders(pto,
5145  GetLocator(state.m_chain_sync.m_work_header->pprev),
5146  peer);
5147  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5148  state.m_chain_sync.m_sent_getheaders = true;
5149  // Bump the timeout to allow a response, which could clear the timeout
5150  // (if the response shows the peer has synced), reset the timeout (if
5151  // the peer syncs to the required work but not to our tip), or result
5152  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5153  // has not sufficiently progressed)
5154  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5155  }
5156  }
5157  }
5158 }
5159 
5160 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5161 {
5162  // If we have any extra block-relay-only peers, disconnect the youngest unless
5163  // it's given us a block -- in which case, compare with the second-youngest, and
5164  // out of those two, disconnect the peer who least recently gave us a block.
5165  // The youngest block-relay-only peer would be the extra peer we connected
5166  // to temporarily in order to sync our tip; see net.cpp.
5167  // Note that we use higher nodeid as a measure for most recent connection.
5168  if (m_connman.GetExtraBlockRelayCount() > 0) {
5169  std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5170 
5171  m_connman.ForEachNode([&](CNode* pnode) {
5172  if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5173  if (pnode->GetId() > youngest_peer.first) {
5174  next_youngest_peer = youngest_peer;
5175  youngest_peer.first = pnode->GetId();
5176  youngest_peer.second = pnode->m_last_block_time;
5177  }
5178  });
5179  NodeId to_disconnect = youngest_peer.first;
5180  if (youngest_peer.second > next_youngest_peer.second) {
5181  // Our newest block-relay-only peer gave us a block more recently;
5182  // disconnect our second youngest.
5183  to_disconnect = next_youngest_peer.first;
5184  }
5185  m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5187  // Make sure we're not getting a block right now, and that
5188  // we've been connected long enough for this eviction to happen
5189  // at all.
5190  // Note that we only request blocks from a peer if we learn of a
5191  // valid headers chain with at least as much work as our tip.
5192  CNodeState *node_state = State(pnode->GetId());
5193  if (node_state == nullptr ||
5194  (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5195  pnode->fDisconnect = true;
5196  LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5197  pnode->GetId(), count_seconds(pnode->m_last_block_time));
5198  return true;
5199  } else {
5200  LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5201  pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5202  }
5203  return false;
5204  });
5205  }
5206 
5207  // Check whether we have too many outbound-full-relay peers
5208  if (m_connman.GetExtraFullOutboundCount() > 0) {
5209  // If we have more outbound-full-relay peers than we target, disconnect one.
5210  // Pick the outbound-full-relay peer that least recently announced
5211  // us a new block, with ties broken by choosing the more recent
5212  // connection (higher node id)
5213  // Protect peers from eviction if we don't have another connection
5214  // to their network, counting both outbound-full-relay and manual peers.
5215  NodeId worst_peer = -1;
5216  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5217 
5218  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5220 
5221  // Only consider outbound-full-relay peers that are not already
5222  // marked for disconnection
5223  if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5224  CNodeState *state = State(pnode->GetId());
5225  if (state == nullptr) return; // shouldn't be possible, but just in case
5226  // Don't evict our protected peers
5227  if (state->m_chain_sync.m_protect) return;
5228  // If this is the only connection on a particular network that is
5229  // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5230  if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5231  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5232  worst_peer = pnode->GetId();
5233  oldest_block_announcement = state->m_last_block_announcement;
5234  }
5235  });
5236  if (worst_peer != -1) {
5237  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5239 
5240  // Only disconnect a peer that has been connected to us for
5241  // some reasonable fraction of our check-frequency, to give
5242  // it time for new information to have arrived.
5243  // Also don't disconnect any peer we're trying to download a
5244  // block from.
5245  CNodeState &state = *State(pnode->GetId());
5246  if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5247  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5248  pnode->fDisconnect = true;
5249  return true;
5250  } else {
5251  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5252  pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5253  return false;
5254  }
5255  });
5256  if (disconnected) {
5257  // If we disconnected an extra peer, that means we successfully
5258  // connected to at least one peer after the last time we
5259  // detected a stale tip. Don't try any more extra peers until
5260  // we next detect a stale tip, to limit the load we put on the
5261  // network from these extra connections.
5262  m_connman.SetTryNewOutboundPeer(false);
5263  }
5264  }
5265  }
5266 }
5267 
5268 void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5269 {
5270  LOCK(cs_main);
5271 
5272  auto now{GetTime<std::chrono::seconds>()};
5273 
5274  EvictExtraOutboundPeers(now);
5275 
5276  if (now > m_stale_tip_check_time) {
5277  // Check whether our tip is stale, and if so, allow using an extra
5278  // outbound peer
5279  if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5280  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5281  count_seconds(now - m_last_tip_update.load()));
5282  m_connman.SetTryNewOutboundPeer(true);
5283  } else if (m_connman.GetTryNewOutboundPeer()) {
5284  m_connman.SetTryNewOutboundPeer(false);
5285  }
5286  m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5287  }
5288 
5289  if (!m_initial_sync_finished && CanDirectFetch()) {
5290  m_connman.StartExtraBlockRelayPeers();
5291  m_initial_sync_finished = true;
5292  }
5293 }
5294 
5295 void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5296 {
5297  if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5298  peer.m_ping_nonce_sent &&
5299  now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5300  {
5301  // The ping timeout is using mocktime. To disable the check during
5302  // testing, increase -peertimeout.
5303  LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
5304  node_to.fDisconnect = true;
5305  return;
5306  }
5307 
5308  const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
5309  bool pingSend = false;
5310 
5311  if (peer.m_ping_queued) {
5312  // RPC ping request by user
5313  pingSend = true;
5314  }
5315 
5316  if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5317  // Ping automatically sent as a latency probe & keepalive.
5318  pingSend = true;
5319  }
5320 
5321  if (pingSend) {
5322  uint64_t nonce;
5323  do {
5324  nonce = GetRand<uint64_t>();
5325  } while (nonce == 0);
5326  peer.m_ping_queued = false;
5327  peer.m_ping_start = now;
5328  if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5329  peer.m_ping_nonce_sent = nonce;
5330  m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING, nonce));
5331  } else {
5332  // Peer is too old to support ping command with nonce, pong will never arrive.
5333  peer.m_ping_nonce_sent = 0;
5334  m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
5335  }
5336  }
5337 }
5338 
5339 void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5340 {
5341  // Nothing to do for non-address-relay peers
5342  if (!peer.m_addr_relay_enabled) return;
5343 
5344  LOCK(peer.m_addr_send_times_mutex);
5345  // Periodically advertise our local address to the peer.
5346  if (fListen && !m_chainman.IsInitialBlockDownload() &&
5347  peer.m_next_local_addr_send < current_time) {
5348  // If we've sent before, clear the bloom filter for the peer, so that our
5349  // self-announcement will actually go out.
5350  // This might be unnecessary if the bloom filter has already rolled
5351  // over since our last self-announcement, but there is only a small
5352  // bandwidth cost that we can incur by doing this (which happens
5353  // once a day on average).
5354  if (peer.m_next_local_addr_send != 0us) {
5355  peer.m_addr_known->reset();
5356  }
5357  if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5358  CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5359  PushAddress(peer, local_addr);
5360  }
5361  peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5362  }
5363 
5364  // We sent an `addr` message to this peer recently. Nothing more to do.
5365  if (current_time <= peer.m_next_addr_send) return;
5366 
5367  peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
5368 
5369  if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5370  // Should be impossible since we always check size before adding to
5371  // m_addrs_to_send. Recover by trimming the vector.
5372  peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5373  }
5374 
5375  // Remove addr records that the peer already knows about, and add new
5376  // addrs to the m_addr_known filter on the same pass.
5377  auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5378  bool ret = peer.m_addr_known->contains(addr.GetKey());
5379  if (!ret) peer.m_addr_known->insert(addr.GetKey());
5380  return ret;
5381  };
5382  peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5383  peer.m_addrs_to_send.end());
5384 
5385  // No addr messages to send
5386  if (peer.m_addrs_to_send.empty()) return;
5387 
5388  const char* msg_type;
5389  CNetAddr::Encoding ser_enc;
5390  if (peer.m_wants_addrv2) {
5391  msg_type = NetMsgType::ADDRV2;
5392  ser_enc = CNetAddr::Encoding::V2;
5393  } else {
5394  msg_type = NetMsgType::ADDR;
5395  ser_enc = CNetAddr::Encoding::V1;
5396  }
5397  m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(msg_type, WithParams(CAddress::SerParams{{ser_enc}, CAddress::Format::Network}, peer.m_addrs_to_send)));
5398  peer.m_addrs_to_send.clear();
5399 
5400  // we only send the big addr message once
5401  if (peer.m_addrs_to_send.capacity() > 40) {
5402  peer.m_addrs_to_send.shrink_to_fit();
5403  }
5404 }
5405 
5406 void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5407 {
5408  // Delay sending SENDHEADERS (BIP 130) until we're done with an
5409  // initial-headers-sync with this peer. Receiving headers announcements for
5410  // new blocks while trying to sync their headers chain is problematic,
5411  // because of the state tracking done.
5412  if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5413  LOCK(cs_main);
5414  CNodeState &state = *State(node.GetId());
5415  if (state.pindexBestKnownBlock != nullptr &&
5416  state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5417  // Tell our peer we prefer to receive headers rather than inv's
5418  // We send this to non-NODE NETWORK peers as well, because even
5419  // non-NODE NETWORK peers can announce blocks (such as pruning
5420  // nodes)
5421  m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(NetMsgType::SENDHEADERS));
5422  peer.m_sent_sendheaders = true;
5423  }
5424  }
5425 }
5426 
5427 void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5428 {
5429  if (m_opts.ignore_incoming_txs) return;
5430  if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5431  // peers with the forcerelay permission should not filter txs to us
5433  // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5434  // transactions to us, regardless of feefilter state.
5435  if (pto.IsBlockOnlyConn()) return;
5436 
5437  CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5438 
5439  if (m_chainman.IsInitialBlockDownload()) {
5440  // Received tx-inv messages are discarded when the active
5441  // chainstate is in IBD, so tell the peer to not send them.
5442  currentFilter = MAX_MONEY;
5443  } else {
5444  static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5445  if (peer.m_fee_filter_sent == MAX_FILTER) {
5446  // Send the current filter if we sent MAX_FILTER previously
5447  // and made it out of IBD.
5448  peer.m_next_send_feefilter = 0us;
5449  }
5450  }
5451  if (current_time > peer.m_next_send_feefilter) {
5452  CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5453  // We always have a fee filter of at least the min relay fee
5454  filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
5455  if (filterToSend != peer.m_fee_filter_sent) {
5456  m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
5457  peer.m_fee_filter_sent = filterToSend;
5458  }
5459  peer.m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
5460  }
5461  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5462  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5463  else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5464  (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5465  peer.m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5466  }
5467 }
5468 
5469 namespace {
5470 class CompareInvMempoolOrder
5471 {
5472  CTxMemPool* mp;
5473  bool m_wtxid_relay;
5474 public:
5475  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5476  {
5477  mp = _mempool;
5478  m_wtxid_relay = use_wtxid;
5479  }
5480 
5481  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5482  {
5483  /* As std::make_heap produces a max-heap, we want the entries with the
5484  * fewest ancestors/highest fee to sort later. */
5485  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5486  }
5487 };
5488 } // namespace
5489 
5490 bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5491 {
5492  // block-relay-only peers may never send txs to us
5493  if (peer.IsBlockOnlyConn()) return true;
5494  if (peer.IsFeelerConn()) return true;
5495  // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5496  if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5497  return false;
5498 }
5499 
5500 bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5501 {
5502  // We don't participate in addr relay with outbound block-relay-only
5503  // connections to prevent providing adversaries with the additional
5504  // information of addr traffic to infer the link.
5505  if (node.IsBlockOnlyConn()) return false;
5506 
5507  if (!peer.m_addr_relay_enabled.exchange(true)) {
5508  // During version message processing (non-block-relay-only outbound peers)
5509  // or on first addr-related message we have received (inbound peers), initialize
5510  // m_addr_known.
5511  peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5512  }
5513 
5514  return true;
5515 }
5516 
5517 bool PeerManagerImpl::SendMessages(CNode* pto)
5518 {
5519  AssertLockHeld(g_msgproc_mutex);
5520 
5521  PeerRef peer = GetPeerRef(pto->GetId());
5522  if (!peer) return false;
5523  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5524 
5525  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5526  // disconnect misbehaving peers even before the version handshake is complete.
5527  if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5528 
5529  // Don't send anything until the version handshake is complete
5530  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5531  return true;
5532 
5533  // If we get here, the outgoing message serialization version is set and can't change.
5534  const CNetMsgMaker msgMaker(pto->GetCommonVersion());
5535 
5536  const auto current_time{GetTime<std::chrono::microseconds>()};
5537 
5538  if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5539  LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
5540  pto->fDisconnect = true;
5541  return true;
5542  }
5543 
5544  MaybeSendPing(*pto, *peer, current_time);
5545 
5546  // MaybeSendPing may have marked peer for disconnection
5547  if (pto->fDisconnect) return true;
5548 
5549  MaybeSendAddr(*pto, *peer, current_time);
5550 
5551  MaybeSendSendHeaders(*pto, *peer);
5552 
5553  {
5554  LOCK(cs_main);
5555 
5556  CNodeState &state = *State(pto->GetId());
5557 
5558  // Start block sync
5559  if (m_chainman.m_best_header == nullptr) {
5560  m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5561  }
5562 
5563  // Determine whether we might try initial headers sync or parallel
5564  // block download from this peer -- this mostly affects behavior while
5565  // in IBD (once out of IBD, we sync from all peers).
5566  bool sync_blocks_and_headers_from_peer = false;
5567  if (state.fPreferredDownload) {
5568  sync_blocks_and_headers_from_peer = true;
5569  } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5570  // Typically this is an inbound peer. If we don't have any outbound
5571  // peers, or if we aren't downloading any blocks from such peers,
5572  // then allow block downloads from this peer, too.
5573  // We prefer downloading blocks from outbound peers to avoid
5574  // putting undue load on (say) some home user who is just making
5575  // outbound connections to the network, but if our only source of
5576  // the latest blocks is from an inbound peer, we have to be sure to
5577  // eventually download it (and not just wait indefinitely for an
5578  // outbound peer to have it).
5579  if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5580  sync_blocks_and_headers_from_peer = true;
5581  }
5582  }
5583 
5584  if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5585  // Only actively request headers from a single peer, unless we're close to today.
5586  if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
5587  const CBlockIndex* pindexStart = m_chainman.m_best_header;
5588  /* If possible, start at the block preceding the currently
5589  best known header. This ensures that we always get a
5590  non-empty list of headers back as long as the peer
5591  is up-to-date. With a non-empty response, we can initialise
5592  the peer's known best block. This wouldn't be possible
5593  if we requested starting at m_chainman.m_best_header and
5594  got back an empty response. */
5595  if (pindexStart->pprev)
5596  pindexStart = pindexStart->pprev;
5597  if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5598  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5599 
5600  state.fSyncStarted = true;
5601  peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5602  (
5603  // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5604  // to maintain precision
5605  std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5606  Ticks<std::chrono::seconds>(GetAdjustedTime() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5607  );
5608  nSyncStarted++;
5609  }
5610  }
5611  }
5612 
5613  //
5614  // Try sending block announcements via headers
5615  //
5616  {
5617  // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5618  // list of block hashes we're relaying, and our peer wants
5619  // headers announcements, then find the first header
5620  // not yet known to our peer but would connect, and send.
5621  // If no header would connect, or if we have too many
5622  // blocks, or if the peer doesn't want headers, just
5623  // add all to the inv queue.
5624  LOCK(peer->m_block_inv_mutex);
5625  std::vector<CBlock> vHeaders;
5626  bool fRevertToInv = ((!peer->m_prefers_headers &&
5627  (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5628  peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5629  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5630  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5631 
5632  if (!fRevertToInv) {
5633  bool fFoundStartingHeader = false;
5634  // Try to find first header that our peer doesn't have, and
5635  // then send all headers past that one. If we come across any
5636  // headers that aren't on m_chainman.ActiveChain(), give up.
5637  for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5638  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5639  assert(pindex);
5640  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5641  // Bail out if we reorged away from this block
5642  fRevertToInv = true;
5643  break;
5644  }
5645  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5646  // This means that the list of blocks to announce don't
5647  // connect to each other.
5648  // This shouldn't really be possible to hit during
5649  // regular operation (because reorgs should take us to
5650  // a chain that has some block not on the prior chain,
5651  // which should be caught by the prior check), but one
5652  // way this could happen is by using invalidateblock /
5653  // reconsiderblock repeatedly on the tip, causing it to
5654  // be added multiple times to m_blocks_for_headers_relay.
5655  // Robustly deal with this rare situation by reverting
5656  // to an inv.
5657  fRevertToInv = true;
5658  break;
5659  }
5660  pBestIndex = pindex;
5661  if (fFoundStartingHeader) {
5662  // add this to the headers message
5663  vHeaders.emplace_back(pindex->GetBlockHeader());
5664  } else if (PeerHasHeader(&state, pindex)) {
5665  continue; // keep looking for the first new block
5666  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5667  // Peer doesn't have this header but they do have the prior one.
5668  // Start sending headers.
5669  fFoundStartingHeader = true;
5670  vHeaders.emplace_back(pindex->GetBlockHeader());
5671  } else {
5672  // Peer doesn't have this header or the prior one -- nothing will
5673  // connect, so bail out.
5674  fRevertToInv = true;
5675  break;
5676  }
5677  }
5678  }
5679  if (!fRevertToInv && !vHeaders.empty()) {
5680  if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5681  // We only send up to 1 block as header-and-ids, as otherwise
5682  // probably means we're doing an initial-ish-sync or they're slow
5683  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5684  vHeaders.front().GetHash().ToString(), pto->GetId());
5685 
5686  std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5687  {
5688  LOCK(m_most_recent_block_mutex);
5689  if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5690  cached_cmpctblock_msg = msgMaker.Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5691  }
5692  }
5693  if (cached_cmpctblock_msg.has_value()) {
5694  m_connman.PushMessage(pto, std::move(cached_cmpctblock_msg.value()));
5695  } else {
5696  CBlock block;
5697  const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)};
5698  assert(ret);
5699  CBlockHeaderAndShortTxIDs cmpctblock{block};
5700  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
5701  }
5702  state.pindexBestHeaderSent = pBestIndex;
5703  } else if (peer->m_prefers_headers) {
5704  if (vHeaders.size() > 1) {
5705  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5706  vHeaders.size(),
5707  vHeaders.front().GetHash().ToString(),
5708  vHeaders.back().GetHash().ToString(), pto->GetId());
5709  } else {
5710  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5711  vHeaders.front().GetHash().ToString(), pto->GetId());
5712  }
5713  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5714  state.pindexBestHeaderSent = pBestIndex;
5715  } else
5716  fRevertToInv = true;
5717  }
5718  if (fRevertToInv) {
5719  // If falling back to using an inv, just try to inv the tip.
5720  // The last entry in m_blocks_for_headers_relay was our tip at some point
5721  // in the past.
5722  if (!peer->m_blocks_for_headers_relay.empty()) {
5723  const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5724  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5725  assert(pindex);
5726 
5727  // Warn if we're announcing a block that is not on the main chain.
5728  // This should be very rare and could be optimized out.
5729  // Just log for now.
5730  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5731  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5732  hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5733  }
5734 
5735  // If the peer's chain has this block, don't inv it back.
5736  if (!PeerHasHeader(&state, pindex)) {
5737  peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5738  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5739  pto->GetId(), hashToAnnounce.ToString());
5740  }
5741  }
5742  }
5743  peer->m_blocks_for_headers_relay.clear();
5744  }
5745 
5746  //
5747  // Message: inventory
5748  //
5749  std::vector<CInv> vInv;
5750  {
5751  LOCK(peer->m_block_inv_mutex);
5752  vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5753 
5754  // Add blocks
5755  for (const uint256& hash : peer->m_blocks_for_inv_relay) {
5756  vInv.emplace_back(MSG_BLOCK, hash);
5757  if (vInv.size() == MAX_INV_SZ) {
5758  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5759  vInv.clear();
5760  }
5761  }
5762  peer->m_blocks_for_inv_relay.clear();
5763  }
5764 
5765  if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5766  LOCK(tx_relay->m_tx_inventory_mutex);
5767  // Check whether periodic sends should happen
5768  bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
5769  if (tx_relay->m_next_inv_send_time < current_time) {
5770  fSendTrickle = true;
5771  if (pto->IsInboundConn()) {
5772  tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5773  } else {
5774  tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5775  }
5776  }
5777 
5778  // Time to send but the peer has requested we not relay transactions.
5779  if (fSendTrickle) {
5780  LOCK(tx_relay->m_bloom_filter_mutex);
5781  if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5782  }
5783 
5784  // Respond to BIP35 mempool requests
5785  if (fSendTrickle && tx_relay->m_send_mempool) {
5786  auto vtxinfo = m_mempool.infoAll();
5787  tx_relay->m_send_mempool = false;
5788  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5789 
5790  LOCK(tx_relay->m_bloom_filter_mutex);
5791 
5792  for (const auto& txinfo : vtxinfo) {
5793  const uint256& hash = peer->m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
5794  CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5795  tx_relay->m_tx_inventory_to_send.erase(hash);
5796  // Don't send transactions that peers will not put into their mempool
5797  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5798  continue;
5799  }
5800  if (tx_relay->m_bloom_filter) {
5801  if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5802  }
5803  tx_relay->m_tx_inventory_known_filter.insert(hash);
5804  vInv.push_back(inv);
5805  if (vInv.size() == MAX_INV_SZ) {
5806  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5807  vInv.clear();
5808  }
5809  }
5810  }
5811 
5812  // Determine transactions to relay
5813  if (fSendTrickle) {
5814  // Produce a vector with all candidates for sending
5815  std::vector<std::set<uint256>::iterator> vInvTx;
5816  vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5817  for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5818  vInvTx.push_back(it);
5819  }
5820  const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5821  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5822  // A heap is used so that not all items need sorting if only a few are being sent.
5823  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
5824  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5825  // No reason to drain out at many times the network's capacity,
5826  // especially since we have many peers and some will draw much shorter delays.
5827  unsigned int nRelayedTransactions = 0;
5828  LOCK(tx_relay->m_bloom_filter_mutex);
5829  size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
5830  broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
5831  while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5832  // Fetch the top element from the heap
5833  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5834  std::set<uint256>::iterator it = vInvTx.back();
5835  vInvTx.pop_back();
5836  uint256 hash = *it;
5837  CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5838  // Remove it from the to-be-sent set
5839  tx_relay->m_tx_inventory_to_send.erase(it);
5840  // Check if not in the filter already
5841  if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
5842  continue;
5843  }
5844  // Not in the mempool anymore? don't bother sending it.
5845  auto txinfo = m_mempool.info(ToGenTxid(inv));
5846  if (!txinfo.tx) {
5847  continue;
5848  }
5849  // Peer told you to not send transactions at that feerate? Don't bother sending it.
5850  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5851  continue;
5852  }
5853  if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5854  // Send
5855  vInv.push_back(inv);
5856  nRelayedTransactions++;
5857  if (vInv.size() == MAX_INV_SZ) {
5858  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5859  vInv.clear();
5860  }
5861  tx_relay->m_tx_inventory_known_filter.insert(hash);
5862  }
5863 
5864  // Ensure we'll respond to GETDATA requests for anything we've just announced
5865  LOCK(m_mempool.cs);
5866  tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
5867  }
5868  }
5869  if (!vInv.empty())
5870  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5871 
5872  // Detect whether we're stalling
5873  auto stalling_timeout = m_block_stalling_timeout.load();
5874  if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5875  // Stalling only triggers when the block download window cannot move. During normal steady state,
5876  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5877  // should only happen during initial block download.
5878  LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5879  pto->fDisconnect = true;
5880  // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
5881  // bandwidth is insufficient.
5882  const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
5883  if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5884  LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
5885  }
5886  return true;
5887  }
5888  // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
5889  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
5890  // We compensate for other peers to prevent killing off peers due to our own downstream link
5891  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5892  // to unreasonably increase our timeout.
5893  if (state.vBlocksInFlight.size() > 0) {
5894  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5895  int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5896  if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
5897  LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5898  pto->fDisconnect = true;
5899  return true;
5900  }
5901  }
5902  // Check for headers sync timeouts
5903  if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5904  // Detect whether this is a stalling initial-headers-sync peer
5905  if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
5906  if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5907  // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5908  // and we have others we could be using instead.
5909  // Note: If all our peers are inbound, then we won't
5910  // disconnect our sync peer for stalling; we have bigger
5911  // problems if we can't get any outbound peers.
5913  LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5914  pto->fDisconnect = true;
5915  return true;
5916  } else {
5917  LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5918  // Reset the headers sync state so that we have a
5919  // chance to try downloading from a different peer.
5920  // Note: this will also result in at least one more
5921  // getheaders message to be sent to
5922  // this peer (eventually).
5923  state.fSyncStarted = false;
5924  nSyncStarted--;
5925  peer->m_headers_sync_timeout = 0us;
5926  }
5927  }
5928  } else {
5929  // After we've caught up once, reset the timeout so we can't trigger
5930  // disconnect later.
5931  peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5932  }
5933  }
5934 
5935  // Check that outbound peers have reasonable chains
5936  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
5937  ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5938 
5939  //
5940  // Message: getdata (blocks)
5941  //
5942  std::vector<CInv> vGetData;
5943  if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5944  std::vector<const CBlockIndex*> vToDownload;
5945  NodeId staller = -1;
5946  auto get_inflight_budget = [&state]() {
5947  return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
5948  };
5949 
5950  // If a snapshot chainstate is in use, we want to find its next blocks
5951  // before the background chainstate to prioritize getting to network tip.
5952  FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
5953  if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
5954  TryDownloadingHistoricalBlocks(
5955  *peer,
5956  get_inflight_budget(),
5957  vToDownload, m_chainman.GetBackgroundSyncTip(),
5958  Assert(m_chainman.GetSnapshotBaseBlock()));
5959  }
5960  for (const CBlockIndex *pindex : vToDownload) {
5961  uint32_t nFetchFlags = GetFetchFlags(*peer);
5962  vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
5963  BlockRequested(pto->GetId(), *pindex);
5964  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
5965  pindex->nHeight, pto->GetId());
5966  }
5967  if (state.vBlocksInFlight.empty() && staller != -1) {
5968  if (State(staller)->m_stalling_since == 0us) {
5969  State(staller)->m_stalling_since = current_time;
5970  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
5971  }
5972  }
5973  }
5974 
5975  //
5976  // Message: getdata (transactions)
5977  //
5978  std::vector<std::pair<NodeId, GenTxid>> expired;
5979  auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
5980  for (const auto& entry : expired) {
5981  LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
5982  entry.second.GetHash().ToString(), entry.first);
5983  }
5984  for (const GenTxid& gtxid : requestable) {
5985  if (!AlreadyHaveTx(gtxid)) {
5986  LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
5987  gtxid.GetHash().ToString(), pto->GetId());
5988  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
5989  if (vGetData.size() >= MAX_GETDATA_SZ) {
5990  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
5991  vGetData.clear();
5992  }
5993  m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
5994  } else {
5995  // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
5996  // this should already be called whenever a transaction becomes AlreadyHaveTx().
5997  m_txrequest.ForgetTxHash(gtxid.GetHash());
5998  }
5999  }
6000 
6001 
6002  if (!vGetData.empty())
6003  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
6004  } // release cs_main
6005  MaybeSendFeefilter(*pto, *peer, current_time);
6006  return true;
6007 }
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:421
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:42
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:1007
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:169
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, Options opts)
bool IsMsgWtx() const
Definition: protocol.h:504
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:30
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
int ret
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:33
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:22
std::atomic_bool fPauseSend
Definition: net.h:749
static const int SERIALIZE_TRANSACTION_NO_WITNESS
A flag that is ORed into the protocol version to designate that a transaction should be (un)serialize...
Definition: transaction.h:32
invalid by consensus rules
bool HaveNumChainTxs() const
Check whether this block&#39;s and all previous blocks&#39; transactions have been downloaded (and stored to ...
Definition: chain.h:270
std::chrono::time_point< NodeClock > time_point
Definition: time.h:17
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:41
AssertLockHeld(pool.cs)
static GenTxid Wtxid(const uint256 &hash)
Definition: transaction.h:433
bool IsMsgTx() const
Definition: protocol.h:502
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:19
Definition: banman.h:58
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
std::vector< Byte > ParseHex(std::string_view hex_str)
Like TryParseHex, but returns an empty vector on invalid input.
Definition: strencodings.h:65
ServiceFlags
nServices flags
Definition: protocol.h:274
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
bool IsLocal() const
Definition: netaddress.cpp:399
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3719
#define LogPrint(category,...)
Definition: logging.h:246
int64_t GetBlockTime() const
Definition: chain.h:277
assert(!tx.IsCoinBase())
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:123
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:151
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
uint64_t m_addr_rate_limited
unsigned int nonce
Definition: miner_tests.cpp:72
virtual void UnitTestMisbehaving(NodeId peer_id, int howmuch)=0
std::string ToString() const
Definition: protocol.cpp:173
Definition: block.h:68
ReconciliationRegisterResult
We don&#39;t have the previous block the checked one is built on.
Data structure to keep track of, and schedule, transaction downloads from peers.
Definition: txrequest.h:96
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:28
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:361
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:863
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:827
std::vector< uint16_t > indexes
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1162
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:772
bool IsMsgFilteredBlk() const
Definition: protocol.h:505
An in-memory indexed chain of blocks.
Definition: chain.h:441
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:717
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests.cpp.
const std::optional< std::list< CTransactionRef > > m_replaced_transactions
Mempool transactions replaced by the tx.
Definition: validation.h:135
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
reverse_range< T > reverse_iterate(T &x)
inv message data
Definition: protocol.h:488
invalid proof of work or time too old
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:38
A class to track orphan transactions (failed on TX_MISSING_INPUTS) Since we cannot distinguish orphan...
Definition: txorphanage.h:21
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:73
constexpr auto GetRandMillis
Definition: random.h:97
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:165
transaction was missing some of its inputs
bool IsMsgCmpctBlk() const
Definition: protocol.h:506
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
bool IsFeelerConn() const
Definition: net.h:815
int in_avail() const
Definition: streams.h:262
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:92
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:27
CBlockHeader GetBlockHeader() const
Definition: chain.h:240
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
static constexpr SerParams V2_NETWORK
Definition: protocol.h:405
const uint256 & GetHash() const
Definition: transaction.h:435
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: policy.h:57
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:45
int EraseTx(const uint256 &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase an orphan by txid.
Definition: txorphanage.cpp:56
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:99
void SetCommonVersion(int greatest_common_version)
Definition: net.h:931
We&#39;re done syncing with this peer and can discard any remaining state.
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:21
Defined in BIP152.
Definition: protocol.h:479
Interface for message handling.
Definition: net.h:1003
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:137
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:108
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
const TxValidationState m_state
Contains information about why the transaction failed.
Definition: validation.h:131
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: netbase.h:94
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:24
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:80
violated mempool&#39;s fee/size/descendant/RBF/etc limits
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
the block header may be on a too-little-work chain
Mutex m_subver_mutex
Definition: net.h:729
bool IsNull() const
Definition: block.h:152
inputs (covered by txid) failed policy rules
void ignore(size_t num_ignore)
Definition: streams.h:282
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:465
const ResultType m_result_type
Result type.
Definition: validation.h:128
uint64_t GetLocalNonce() const
Definition: net.h:910
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:314
transaction spends a coinbase too early, or violates locktime/sequence locks
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:43
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep, [[maybe_unused]] VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:31
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:42
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:61
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
initial value. Tx has not yet been rejected
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.cpp:48
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
enum Network GetNetwork() const
Definition: netaddress.cpp:497
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:26
bool empty() const
Definition: streams.h:228
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:46
std::string ToStringAddrPort() const
Definition: netaddress.cpp:889
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we&#39;re only buildi...
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can&#39;t reach it ourselves.
Definition: netaddress.h:222
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:305
bool HaveTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Does this peer have any work to do?
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:20
Stochastic address manager.
Definition: addrman.h:87
NodeClock::time_point GetAdjustedTime()
Definition: timedata.cpp:36
Transaction validation functions.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
bool IsValid() const
Definition: netaddress.cpp:425
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:3873
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:23
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
uint256 GetBlockHash() const
Definition: chain.h:253
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
bool IsValid() const
Definition: validation.h:121
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
BlockFilterType
Definition: blockfilter.h:92
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:109
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:222
initial value. Block has not yet been rejected
bool IsGenBlkMsg() const
Definition: protocol.h:514
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:103
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:569
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:124
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:24
CAmount m_fee_filter_received
const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
Definition: protocol.cpp:49
virtual void InitializeNode(CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:237
uint64_t m_addr_processed
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:871
Invalid by a change to consensus rules more recent than SegWit.
Scripts & signatures ok. Implies all parents are either at least VALID_SCRIPTS, or are ASSUMED_VALID...
Definition: chain.h:106
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
Definition: net.h:688
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:25
std::vector< CTransactionRef > txn
std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future sampled from an exponential distribution (https://en.wikipedia.org/wiki/Exponential_distribution).
Definition: random.cpp:687
this block was cached as being invalid and we didn&#39;t store the reason why
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:867
An input of a transaction.
Definition: transaction.h:74
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:347
const uint256 & GetWitnessHash() const
Definition: transaction.h:338
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
#define LOCK(cs)
Definition: sync.h:258
const char * name
Definition: rest.cpp:45
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.cpp:18
const uint256 & GetHash() const
Definition: transaction.h:337
std::string ToString() const
Definition: validation.h:127
the block failed to meet one of our checkpoints
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:472
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:534
Fast randomness source.
Definition: random.h:143
Transport protocol agnostic message container.
Definition: net.h:234
int64_t nPowTargetSpacing
Definition: params.h:112
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all orphans announced by a peer (eg, after that peer disconnects)
Definition: txorphanage.cpp:96
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:36
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:29
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:56
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:30
static constexpr SerParams V1
Definition: netaddress.h:235
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:616
bool IsGenTxMsg() const
Definition: protocol.h:510
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
bool IsManualConn() const
Definition: net.h:791
virtual void FinalizeNode(const CNode &node)=0
Handle removal of a peer (clear state)
A CService with information about it as peer.
Definition: protocol.h:362
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:881
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
uint256 hash
Definition: protocol.h:520
#define LogPrintLevel(category, level,...)
Definition: logging.h:254
Result GetResult() const
Definition: validation.h:124
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
#define LogPrintfCategory(category,...)
Definition: logging.h:240
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:17
bool ExpectServicesFromConn() const
Definition: net.h:827
int64_t presync_height
int64_t NodeId
Definition: net.h:99
Definition: net.h:1041
Defined in BIP144.
Definition: protocol.h:480
static auto WithParams(const Params &params, T &&t)
Return a wrapper around t that (de)serializes it with specified parameter params. ...
Definition: serialize.h:1191
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:77
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:35
std::string ToString() const
Definition: uint256.cpp:55
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:858
std::vector< uint256 > vHave
Definition: block.h:134
virtual std::optional< std::string > FetchBlock(NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
NodeId GetId() const
Definition: net.h:906
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:32
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
Definition: protocol.h:452
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition: net.h:727
Parameters that influence chain consensus.
Definition: params.h:74
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:963
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:27
std::atomic_bool fDisconnect
Definition: net.h:743
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:118
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:37
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:44
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:884
constexpr bool IsNull() const
Definition: uint256.h:42
bool IsMsgWitnessBlk() const
Definition: protocol.h:507
size_t Size() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Return how many entries exist in the orphange.
Definition: txorphanage.h:55
Validation result for a single transaction mempool acceptance.
Definition: validation.h:119
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:302
bool IsRoutable() const
Definition: netaddress.cpp:463
#define Assume(val)
Assume is the identity function.
Definition: check.h:85
256-bit unsigned big integer.
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any orphans that list a particular tx as a parent into the from peer&#39;s work set.
bool IsWtxid() const
Definition: transaction.h:434
virtual void ProcessMessage(CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:356
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:54
std::chrono::microseconds m_ping_wait
CTransactionRef GetTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Extract a transaction from a peer&#39;s work set Returns nullptr if there are no transactions to work on...
Definition: init.h:25
const CAddress addr
Definition: net.h:720
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:23
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
bool IsBlockOnlyConn() const
Definition: net.h:811
Transaction is missing a witness.
size_type size() const
Definition: streams.h:227
if(!SetupNetworking())
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:306
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:16
bool IsMsgBlk() const
Definition: protocol.h:503
uint256 GetHash() const
Definition: block.cpp:11
256-bit opaque blob.
Definition: uint256.h:106
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:52
ServiceFlags their_services
invalid by consensus rules (excluding any below reasons)
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:70
bool HasWitness() const
Definition: transaction.h:367
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:264
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:454
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:72
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:39
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
the block&#39;s data didn&#39;t match the data committed to by the PoW
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:301
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
virtual void SendPings()=0
Send ping message to all peers.
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:27
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:15
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:137
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:144
const CChainParams & Params()
Return the currently selected parameters.
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new orphan transaction.
Definition: txorphanage.cpp:19
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all orphans included in or invalidated by a new block.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:18
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
virtual void RelayTransaction(const uint256 &txid, const uint256 &wtxid)=0
Relay transaction to all peers.
BIP155 encoding.
bool IsTxAvailable(size_t index) const
A block this one builds on is invalid.
bool fLogIPs
Definition: logging.cpp:41
#define TRACE6(context, event, a, b, c, d, e, f)
Definition: trace.h:47
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:130
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:26
#define LIMITED_STRING(obj, n)
Definition: serialize.h:524
std::atomic< int64_t > nTimeOffset
Definition: net.h:718
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:21
bool fListen
Definition: net.cpp:115
Fee rate in satoshis per kilovirtualbyte: CAmount / kvB.
Definition: feerate.h:32
static constexpr auto OVERLOADED_PEER_TX_DELAY
How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT).
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span
Like the Span constructor, but for (const) unsigned char member types only.
Definition: span.h:281
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition: net.h:740
SipHash-2-4.
Definition: siphash.h:14
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:26
#define AssertLockNotHeld(cs)
Definition: sync.h:148
bool IsInvalid() const
Definition: validation.h:122
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:69
static int count
std::atomic< int > nVersion
Definition: net.h:728
Invalid by a change to consensus rules more recent than SegWit.
Transaction reconciliation is a way for nodes to efficiently announce transactions.
#define GUARDED_BY(x)
Definition: threadsafety.h:38
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
std::string ConnectionTypeAsString() const
Definition: net.h:960
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:47
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
this node does not have a mempool so can&#39;t validate the transaction
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
Definition: logging.h:207
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:860
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:50
bool IsAddrFetchConn() const
Definition: net.h:819
HeadersSyncState:
Definition: headerssync.h:101
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:131
A Span is an object that can refer to a contiguous sequence of objects.
Definition: solver.h:20
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:25
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:294
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:157
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:218
Information about a peer.
Definition: net.h:683
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
std::vector< int > vHeightInFlight
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e...
Definition: net.h:878
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:38
static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT
Maximum number of in-flight transaction requests from a peer.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:120
full block available in blk*.dat
Definition: chain.h:112
virtual bool SendMessages(CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:47
static constexpr SerParams V1_NETWORK
Definition: protocol.h:404
#define LogPrintf(...)
Definition: logging.h:237
int64_t GetTime()
DEPRECATED, see GetTime.
Definition: time.cpp:97
Defined in BIP 339.
Definition: protocol.h:476
int GetCommonVersion() const
Definition: net.h:936
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:33
bool HaveTx(const GenTxid &gtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if we already have an orphan transaction (by txid or wtxid)
const std::string m_addr_name
Definition: net.h:723
COutPoint prevout
Definition: transaction.h:77
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:478
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
Definition: version.h:36
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:736
bool IsInboundConn() const
Definition: net.h:823
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: cs_main.cpp:8
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we&#39;re willing to process on average.
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
otherwise didn&#39;t meet our local policy rules
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
A generic txid reference (txid or wtxid).
Definition: transaction.h:425
virtual bool ProcessMessages(CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:176
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:21
static GenTxid Txid(const uint256 &hash)
Definition: transaction.h:432
void LimitOrphans(unsigned int max_orphans) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the orphanage to the given maximum.
static constexpr auto GETDATA_TX_INTERVAL
How long to wait before downloading a transaction from an additional peer.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:34
bool IsFullOutboundConn() const
Definition: net.h:787
#define Assert(val)
Identity function.
Definition: check.h:73
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:146
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS
Maximum number of unconnecting headers announcements before DoS score.
ReachableNets g_reachable_nets
Definition: netbase.cpp:35
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:39
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:40
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
uint256 hash
Definition: transaction.h:38