Bitcoin Core 31.0.0
P2P Digital Currency
Loading...
Searching...
No Matches
net_processing.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-present The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include <net_processing.h>
7
8#include <addrman.h>
9#include <arith_uint256.h>
10#include <banman.h>
11#include <blockencodings.h>
12#include <blockfilter.h>
13#include <chain.h>
14#include <chainparams.h>
15#include <common/bloom.h>
16#include <consensus/amount.h>
17#include <consensus/params.h>
19#include <core_memusage.h>
20#include <crypto/siphash.h>
21#include <deploymentstatus.h>
22#include <flatfile.h>
23#include <headerssync.h>
25#include <kernel/types.h>
26#include <logging.h>
27#include <merkleblock.h>
28#include <net.h>
29#include <net_permissions.h>
30#include <netaddress.h>
31#include <netbase.h>
32#include <netmessagemaker.h>
33#include <node/blockstorage.h>
36#include <node/timeoffsets.h>
37#include <node/txdownloadman.h>
38#include <node/txorphanage.h>
40#include <node/warnings.h>
41#include <policy/feerate.h>
43#include <policy/packages.h>
44#include <policy/policy.h>
45#include <primitives/block.h>
47#include <private_broadcast.h>
48#include <protocol.h>
49#include <random.h>
50#include <scheduler.h>
51#include <script/script.h>
52#include <serialize.h>
53#include <span.h>
54#include <streams.h>
55#include <sync.h>
56#include <tinyformat.h>
57#include <txmempool.h>
58#include <uint256.h>
59#include <util/check.h>
60#include <util/strencodings.h>
61#include <util/time.h>
62#include <util/trace.h>
63#include <validation.h>
64
65#include <algorithm>
66#include <array>
67#include <atomic>
68#include <compare>
69#include <cstddef>
70#include <deque>
71#include <exception>
72#include <functional>
73#include <future>
74#include <initializer_list>
75#include <iterator>
76#include <limits>
77#include <list>
78#include <map>
79#include <memory>
80#include <optional>
81#include <queue>
82#include <ranges>
83#include <ratio>
84#include <set>
85#include <span>
86#include <typeinfo>
87#include <utility>
88
90using namespace util::hex_literals;
91
92TRACEPOINT_SEMAPHORE(net, inbound_message);
93TRACEPOINT_SEMAPHORE(net, misbehaving_connection);
94
97static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
98static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
100static constexpr auto HEADERS_RESPONSE_TIME{2min};
106static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
108static constexpr auto STALE_CHECK_INTERVAL{10min};
110static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
112static constexpr auto MINIMUM_CONNECT_TIME{30s};
114static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
117static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
120static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
122static constexpr auto PING_INTERVAL{2min};
124static const unsigned int MAX_LOCATOR_SZ = 101;
126static const unsigned int MAX_INV_SZ = 50000;
128static const unsigned int MAX_GETDATA_SZ = 1000;
130static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
133static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
135static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
138static const int MAX_CMPCTBLOCK_DEPTH = 5;
140static const int MAX_BLOCKTXN_DEPTH = 10;
141static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high");
146static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
148static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
150static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
152static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
154static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
156static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
158static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
160static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
162static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
165static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
172static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND{14};
176static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
177static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
178static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
180static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
182static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
184static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
186static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
188static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
190static constexpr size_t MAX_ADDR_TO_SEND{1000};
193static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
199static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
201static constexpr size_t NUM_PRIVATE_BROADCAST_PER_TX{3};
204
205// Internal stuff
206namespace {
208struct QueuedBlock {
210 const CBlockIndex* pindex;
212 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
213};
214
227struct Peer {
229 const NodeId m_id{0};
230
244 const ServiceFlags m_our_services;
246 std::atomic<ServiceFlags> m_their_services{NODE_NONE};
247
249 const bool m_is_inbound;
250
252 Mutex m_misbehavior_mutex;
254 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
255
257 Mutex m_block_inv_mutex;
261 std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
265 std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
270 uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
271
273 bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
274
276 // TODO: remove in v32.0, only show reported height once in "receive version message: ..." debug log
277 std::atomic<int> m_starting_height{-1};
278
280 std::atomic<uint64_t> m_ping_nonce_sent{0};
282 std::atomic<std::chrono::microseconds> m_ping_start{0us};
284 std::atomic<bool> m_ping_queued{false};
285
287 std::atomic<bool> m_wtxid_relay{false};
294 std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
295
296 struct TxRelay {
297 mutable RecursiveMutex m_bloom_filter_mutex;
299 bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
301 std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
302
303 mutable RecursiveMutex m_tx_inventory_mutex;
307 CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
312 std::set<Wtxid> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
316 bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
319 std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
322 uint64_t m_last_inv_sequence GUARDED_BY(m_tx_inventory_mutex){1};
323
325 std::atomic<CAmount> m_fee_filter_received{0};
326 };
327
328 /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
329 TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
330 {
331 LOCK(m_tx_relay_mutex);
332 Assume(!m_tx_relay);
333 m_tx_relay = std::make_unique<Peer::TxRelay>();
334 return m_tx_relay.get();
335 };
336
337 TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
338 {
339 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
340 };
341
343 std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
353 std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
368 std::atomic_bool m_addr_relay_enabled{false};
370 bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
372 mutable Mutex m_addr_send_times_mutex;
374 std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
376 std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
379 std::atomic_bool m_wants_addrv2{false};
381 bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
384 double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
386 std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
388 std::atomic<uint64_t> m_addr_rate_limited{0};
390 std::atomic<uint64_t> m_addr_processed{0};
391
393 bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
394
396 Mutex m_getdata_requests_mutex;
398 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
399
402
404 Mutex m_headers_sync_mutex;
407 std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
408
410 std::atomic<bool> m_sent_sendheaders{false};
411
413 std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
414
416 bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
417
420 std::atomic<std::chrono::seconds> m_time_offset{0s};
421
422 explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound)
423 : m_id{id}
424 , m_our_services{our_services}
425 , m_is_inbound{is_inbound}
426 {}
427
428private:
429 mutable Mutex m_tx_relay_mutex;
430
432 std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
433};
434
435using PeerRef = std::shared_ptr<Peer>;
436
443struct CNodeState {
445 const CBlockIndex* pindexBestKnownBlock{nullptr};
447 uint256 hashLastUnknownBlock{};
449 const CBlockIndex* pindexLastCommonBlock{nullptr};
451 const CBlockIndex* pindexBestHeaderSent{nullptr};
453 bool fSyncStarted{false};
455 std::chrono::microseconds m_stalling_since{0us};
456 std::list<QueuedBlock> vBlocksInFlight;
458 std::chrono::microseconds m_downloading_since{0us};
460 bool fPreferredDownload{false};
462 bool m_requested_hb_cmpctblocks{false};
464 bool m_provides_cmpctblocks{false};
465
490 struct ChainSyncTimeoutState {
492 std::chrono::seconds m_timeout{0s};
494 const CBlockIndex* m_work_header{nullptr};
496 bool m_sent_getheaders{false};
498 bool m_protect{false};
499 };
500
501 ChainSyncTimeoutState m_chain_sync;
502
504 int64_t m_last_block_announcement{0};
505};
506
507class PeerManagerImpl final : public PeerManager
508{
509public:
510 PeerManagerImpl(CConnman& connman, AddrMan& addrman,
511 BanMan* banman, ChainstateManager& chainman,
512 CTxMemPool& pool, node::Warnings& warnings, Options opts);
513
515 void ActiveTipChange(const CBlockIndex& new_tip, bool) override
516 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
517 void BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
518 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
519 void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
520 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
521 void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
522 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
523 void BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state) override
524 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
525 void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
526 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
527
529 void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex);
530 void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex);
531 bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
532 bool ProcessMessages(CNode& node, std::atomic<bool>& interrupt) override
533 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
534 bool SendMessages(CNode& node) override
535 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex);
536
538 void StartScheduledTasks(CScheduler& scheduler) override;
539 void CheckForStaleTipAndEvictPeers() override;
540 util::Expected<void, std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
541 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
542 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
543 std::vector<node::TxOrphanage::OrphanInfo> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
544 PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
545 std::vector<PrivateBroadcast::TxBroadcastInfo> GetPrivateBroadcastInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
546 std::vector<CTransactionRef> AbortPrivateBroadcast(const uint256& id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
547 void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
548 void InitiateTxBroadcastToAll(const Txid& txid, const Wtxid& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
549 void InitiateTxBroadcastPrivate(const CTransactionRef& tx) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
550 void SetBestBlock(int height, std::chrono::seconds time) override
551 {
552 m_best_height = height;
553 m_best_block_time = time;
554 };
555 void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };
556 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
557 ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
558
559private:
560 void ProcessMessage(Peer& peer, CNode& pfrom, const std::string& msg_type, DataStream& vRecv, std::chrono::microseconds time_received,
561 const std::atomic<bool>& interruptMsgProc)
562 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
563
565 void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
566
568 void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
569
571 void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
572
574 void ReattemptPrivateBroadcast(CScheduler& scheduler);
575
578 PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
579
582 PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
583
586 void Misbehaving(Peer& peer, const std::string& message);
587
596 void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
597 bool via_compact_block, const std::string& message = "")
598 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
599
606 bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
607
619 std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
620 bool first_time_failure)
621 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
622
625 void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
626 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
627
631 void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
632 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
633
645 bool ProcessOrphanTx(Peer& peer)
646 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex);
647
655 void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
656 std::vector<CBlockHeader>&& headers,
657 bool via_compact_block)
658 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
661 bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer);
663 arith_uint256 GetAntiDoSWorkThreshold();
667 void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
669 bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
688 bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
689 std::vector<CBlockHeader>& headers)
690 EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
702 bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
703 const CBlockIndex& chain_start_header,
704 std::vector<CBlockHeader>& headers)
705 EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
706
709 bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
710
715 bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
717 void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
719 void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
720 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
721
722 void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
723
725 void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); }
726 template <typename... Args>
727 void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const
728 {
729 m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...));
730 }
731
733 void PushNodeVersion(CNode& pnode, const Peer& peer);
734
739 void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
740
742 void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
743
745 void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
746
754 void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
755
757 void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
758
759 FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
760
761 FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
762
763 const CChainParams& m_chainparams;
764 CConnman& m_connman;
765 AddrMan& m_addrman;
767 BanMan* const m_banman;
768 ChainstateManager& m_chainman;
769 CTxMemPool& m_mempool;
770
779 Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs);
780 node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex);
781
782 std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
783
785 std::atomic<int> m_best_height{-1};
787 std::atomic<std::chrono::seconds> m_best_block_time{0s};
788
790 std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
791
792 node::Warnings& m_warnings;
793 TimeOffsets m_outbound_time_offsets{m_warnings};
794
795 const Options m_opts;
796
797 bool RejectIncomingTxs(const CNode& peer) const;
798
801 bool m_initial_sync_finished GUARDED_BY(cs_main){false};
802
805 mutable Mutex m_peer_mutex;
812 std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
813
815 std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
816
818 const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
820 CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
821
822 uint32_t GetFetchFlags(const Peer& peer) const;
823
824 std::map<uint64_t, std::chrono::microseconds> m_next_inv_to_inbounds_per_network_key GUARDED_BY(g_msgproc_mutex);
825
827 int nSyncStarted GUARDED_BY(cs_main) = 0;
828
830 uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
831
838 std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
839
841 std::atomic<int> m_wtxid_relay_peers{0};
842
844 int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
845
847 int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
848
850 std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
851
859 std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
860 std::chrono::seconds average_interval,
861 uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
862
863
864 // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
865 Mutex m_most_recent_block_mutex;
866 std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
867 std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
868 uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
869 std::unique_ptr<const std::map<GenTxid, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
870
871 // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
873 Mutex m_headers_presync_mutex;
881 using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
883 std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
885 NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
887 std::atomic_bool m_headers_presync_should_signal{false};
888
890 int m_highest_fast_announce GUARDED_BY(::cs_main){0};
891
893 bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
894
896 bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
897
905 void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
906
907 /* Mark a block as in flight
908 * Returns false, still setting pit, if the block was already in flight from the same peer
909 * pit will only be valid as long as the same cs_main lock is being held
910 */
911 bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
912
913 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
914
918 void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
919
921 void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
922
950 void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
951
952 /* Multimap used to preserve insertion order */
953 typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
954 BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
955
957 std::atomic<std::chrono::seconds> m_last_tip_update{0s};
958
960 CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
961 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, !tx_relay.m_tx_inventory_mutex);
962
963 void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
964 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
966
968 void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
969
971 void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
972 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
973
980 void PushPrivateBroadcastTx(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
981
988 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
989
991 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
992
994 int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
995
996 void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
997
1001 std::vector<std::pair<Wtxid, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
1003 size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
1004
1006 void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1008 void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1009 bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1010
1015 int64_t ApproximateBestBlockDepth() const;
1016
1023 bool BlockRequestAllowed(const CBlockIndex& block_index) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1024 bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1025 void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
1026 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
1027
1043 bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
1044 BlockFilterType filter_type, uint32_t start_height,
1045 const uint256& stop_hash, uint32_t max_height_diff,
1046 const CBlockIndex*& stop_index,
1047 BlockFilterIndex*& filter_index);
1048
1058 void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv);
1059
1069 void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv);
1070
1080 void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv);
1081
1088 bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1089
1090 void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1091 void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1092
1093 void LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block);
1094
1096 PrivateBroadcast m_tx_for_private_broadcast;
1097};
1098
1099const CNodeState* PeerManagerImpl::State(NodeId pnode) const
1100{
1101 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1102 if (it == m_node_states.end())
1103 return nullptr;
1104 return &it->second;
1105}
1106
1107CNodeState* PeerManagerImpl::State(NodeId pnode)
1108{
1109 return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1110}
1111
1117static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1118{
1119 return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1120}
1121
1122void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1123{
1124 assert(peer.m_addr_known);
1125 peer.m_addr_known->insert(addr.GetKey());
1126}
1127
1128void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1129{
1130 // Known checking here is only to save space from duplicates.
1131 // Before sending, we'll filter it again for known addresses that were
1132 // added after addresses were pushed.
1133 assert(peer.m_addr_known);
1134 if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1135 if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1136 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1137 } else {
1138 peer.m_addrs_to_send.push_back(addr);
1139 }
1140 }
1141}
1142
1143static void AddKnownTx(Peer& peer, const uint256& hash)
1144{
1145 auto tx_relay = peer.GetTxRelay();
1146 if (!tx_relay) return;
1147
1148 LOCK(tx_relay->m_tx_inventory_mutex);
1149 tx_relay->m_tx_inventory_known_filter.insert(hash);
1150}
1151
1153static bool CanServeBlocks(const Peer& peer)
1154{
1155 return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1156}
1157
1160static bool IsLimitedPeer(const Peer& peer)
1161{
1162 return (!(peer.m_their_services & NODE_NETWORK) &&
1163 (peer.m_their_services & NODE_NETWORK_LIMITED));
1164}
1165
1167static bool CanServeWitnesses(const Peer& peer)
1168{
1169 return peer.m_their_services & NODE_WITNESS;
1170}
1171
1172std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1173 std::chrono::seconds average_interval,
1174 uint64_t network_key)
1175{
1176 auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us);
1177 auto& timer{it->second};
1178 if (timer < now) {
1179 timer = now + m_rng.rand_exp_duration(average_interval);
1180 }
1181 return timer;
1182}
1183
1184bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1185{
1186 return mapBlocksInFlight.contains(hash);
1187}
1188
1189bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1190{
1191 for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1192 auto [nodeid, block_it] = range.first->second;
1193 PeerRef peer{GetPeerRef(nodeid)};
1194 if (peer && !peer->m_is_inbound) return true;
1195 }
1196
1197 return false;
1198}
1199
1200void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1201{
1202 auto range = mapBlocksInFlight.equal_range(hash);
1203 if (range.first == range.second) {
1204 // Block was not requested from any peer
1205 return;
1206 }
1207
1208 // We should not have requested too many of this block
1209 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1210
1211 while (range.first != range.second) {
1212 const auto& [node_id, list_it]{range.first->second};
1213
1214 if (from_peer && *from_peer != node_id) {
1215 range.first++;
1216 continue;
1217 }
1218
1219 CNodeState& state = *Assert(State(node_id));
1220
1221 if (state.vBlocksInFlight.begin() == list_it) {
1222 // First block on the queue was received, update the start download time for the next one
1223 state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1224 }
1225 state.vBlocksInFlight.erase(list_it);
1226
1227 if (state.vBlocksInFlight.empty()) {
1228 // Last validated block on the queue for this peer was received.
1229 m_peers_downloading_from--;
1230 }
1231 state.m_stalling_since = 0us;
1232
1233 range.first = mapBlocksInFlight.erase(range.first);
1234 }
1235}
1236
1237bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1238{
1239 const uint256& hash{block.GetBlockHash()};
1240
1241 CNodeState *state = State(nodeid);
1242 assert(state != nullptr);
1243
1244 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1245
1246 // Short-circuit most stuff in case it is from the same node
1247 for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1248 if (range.first->second.first == nodeid) {
1249 if (pit) {
1250 *pit = &range.first->second.second;
1251 }
1252 return false;
1253 }
1254 }
1255
1256 // Make sure it's not being fetched already from same peer.
1257 RemoveBlockRequest(hash, nodeid);
1258
1259 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1260 {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1261 if (state->vBlocksInFlight.size() == 1) {
1262 // We're starting a block download (batch) from this peer.
1263 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1264 m_peers_downloading_from++;
1265 }
1266 auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1267 if (pit) {
1268 *pit = &itInFlight->second.second;
1269 }
1270 return true;
1271}
1272
1273void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1274{
1276
1277 // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1278 // mempool will not contain the transactions necessary to reconstruct the
1279 // compact block.
1280 if (m_opts.ignore_incoming_txs) return;
1281
1282 CNodeState* nodestate = State(nodeid);
1283 PeerRef peer{GetPeerRef(nodeid)};
1284 if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1285 // Don't request compact blocks if the peer has not signalled support
1286 return;
1287 }
1288
1289 int num_outbound_hb_peers = 0;
1290 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1291 if (*it == nodeid) {
1292 lNodesAnnouncingHeaderAndIDs.erase(it);
1293 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1294 return;
1295 }
1296 PeerRef peer_ref{GetPeerRef(*it)};
1297 if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
1298 }
1299 if (peer && peer->m_is_inbound) {
1300 // If we're adding an inbound HB peer, make sure we're not removing
1301 // our last outbound HB peer in the process.
1302 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1303 PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
1304 if (remove_peer && !remove_peer->m_is_inbound) {
1305 // Put the HB outbound peer in the second slot, so that it
1306 // doesn't get removed.
1307 std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1308 }
1309 }
1310 }
1311 m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1313 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1314 // As per BIP152, we only get 3 of our peers to announce
1315 // blocks using compact encodings.
1316 m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1317 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
1318 // save BIP152 bandwidth state: we select peer to be low-bandwidth
1319 pnodeStop->m_bip152_highbandwidth_to = false;
1320 return true;
1321 });
1322 lNodesAnnouncingHeaderAndIDs.pop_front();
1323 }
1324 MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION);
1325 // save BIP152 bandwidth state: we select peer to be high-bandwidth
1326 pfrom->m_bip152_highbandwidth_to = true;
1327 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1328 return true;
1329 });
1330}
1331
1332bool PeerManagerImpl::TipMayBeStale()
1333{
1335 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1336 if (m_last_tip_update.load() == 0s) {
1337 m_last_tip_update = GetTime<std::chrono::seconds>();
1338 }
1339 return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1340}
1341
1342int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
1343{
1344 return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
1345}
1346
1347bool PeerManagerImpl::CanDirectFetch()
1348{
1349 return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1350}
1351
1352static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1353{
1354 if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1355 return true;
1356 if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1357 return true;
1358 return false;
1359}
1360
1361void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1362 CNodeState *state = State(nodeid);
1363 assert(state != nullptr);
1364
1365 if (!state->hashLastUnknownBlock.IsNull()) {
1366 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1367 if (pindex && pindex->nChainWork > 0) {
1368 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1369 state->pindexBestKnownBlock = pindex;
1370 }
1371 state->hashLastUnknownBlock.SetNull();
1372 }
1373 }
1374}
1375
1376void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1377 CNodeState *state = State(nodeid);
1378 assert(state != nullptr);
1379
1380 ProcessBlockAvailability(nodeid);
1381
1382 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1383 if (pindex && pindex->nChainWork > 0) {
1384 // An actually better block was announced.
1385 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1386 state->pindexBestKnownBlock = pindex;
1387 }
1388 } else {
1389 // An unknown block was announced; just assume that the latest one is the best one.
1390 state->hashLastUnknownBlock = hash;
1391 }
1392}
1393
1394// Logic for calculating which blocks to download from a given peer, given our current tip.
1395void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1396{
1397 if (count == 0)
1398 return;
1399
1400 vBlocks.reserve(vBlocks.size() + count);
1401 CNodeState *state = State(peer.m_id);
1402 assert(state != nullptr);
1403
1404 // Make sure pindexBestKnownBlock is up to date, we'll need it.
1405 ProcessBlockAvailability(peer.m_id);
1406
1407 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1408 // This peer has nothing interesting.
1409 return;
1410 }
1411
1412 // When syncing with AssumeUtxo and the snapshot has not yet been validated,
1413 // abort downloading blocks from peers that don't have the snapshot block in their best chain.
1414 // We can't reorg to this chain due to missing undo data until validation completes,
1415 // so downloading blocks from it would be futile.
1416 const CBlockIndex* snap_base{m_chainman.CurrentChainstate().SnapshotBase()};
1417 if (snap_base && m_chainman.CurrentChainstate().m_assumeutxo == Assumeutxo::UNVALIDATED &&
1418 state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) {
1419 LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id);
1420 return;
1421 }
1422
1423 // Determine the forking point between the peer's chain and our chain:
1424 // pindexLastCommonBlock is required to be an ancestor of pindexBestKnownBlock, and will be used as a starting point.
1425 // It is being set to the fork point between the peer's best known block and the current tip, unless it is already set to
1426 // an ancestor with more work than the fork point.
1427 auto fork_point = LastCommonAncestor(state->pindexBestKnownBlock, m_chainman.ActiveTip());
1428 if (state->pindexLastCommonBlock == nullptr ||
1429 fork_point->nChainWork > state->pindexLastCommonBlock->nChainWork ||
1430 state->pindexBestKnownBlock->GetAncestor(state->pindexLastCommonBlock->nHeight) != state->pindexLastCommonBlock) {
1431 state->pindexLastCommonBlock = fork_point;
1432 }
1433 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1434 return;
1435
1436 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1437 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1438 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1439 // download that next block if the window were 1 larger.
1440 int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1441
1442 FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1443}
1444
1445void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1446{
1447 Assert(from_tip);
1448 Assert(target_block);
1449
1450 if (vBlocks.size() >= count) {
1451 return;
1452 }
1453
1454 vBlocks.reserve(count);
1455 CNodeState *state = Assert(State(peer.m_id));
1456
1457 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1458 // This peer can't provide us the complete series of blocks leading up to the
1459 // assumeutxo snapshot base.
1460 //
1461 // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1462 // will eventually crash when we try to reorg to it. Let other logic
1463 // deal with whether we disconnect this peer.
1464 //
1465 // TODO at some point in the future, we might choose to request what blocks
1466 // this peer does have from the historical chain, despite it not having a
1467 // complete history beneath the snapshot base.
1468 return;
1469 }
1470
1471 FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1472}
1473
1474void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1475{
1476 std::vector<const CBlockIndex*> vToFetch;
1477 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1478 bool is_limited_peer = IsLimitedPeer(peer);
1479 NodeId waitingfor = -1;
1480 while (pindexWalk->nHeight < nMaxHeight) {
1481 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1482 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1483 // as iterating over ~100 CBlockIndex* entries anyway.
1484 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1485 vToFetch.resize(nToFetch);
1486 pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1487 vToFetch[nToFetch - 1] = pindexWalk;
1488 for (unsigned int i = nToFetch - 1; i > 0; i--) {
1489 vToFetch[i - 1] = vToFetch[i]->pprev;
1490 }
1491
1492 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1493 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1494 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1495 // already part of our chain (and therefore don't need it even if pruned).
1496 for (const CBlockIndex* pindex : vToFetch) {
1497 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1498 // We consider the chain that this peer is on invalid.
1499 return;
1500 }
1501
1502 if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1503 // We wouldn't download this block or its descendants from this peer.
1504 return;
1505 }
1506
1507 if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1508 if (activeChain && pindex->HaveNumChainTxs()) {
1509 state->pindexLastCommonBlock = pindex;
1510 }
1511 continue;
1512 }
1513
1514 // Is block in-flight?
1515 if (IsBlockRequested(pindex->GetBlockHash())) {
1516 if (waitingfor == -1) {
1517 // This is the first already-in-flight block.
1518 waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1519 }
1520 continue;
1521 }
1522
1523 // The block is not already downloaded, and not yet in flight.
1524 if (pindex->nHeight > nWindowEnd) {
1525 // We reached the end of the window.
1526 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1527 // We aren't able to fetch anything, but we would be if the download window was one larger.
1528 if (nodeStaller) *nodeStaller = waitingfor;
1529 }
1530 return;
1531 }
1532
1533 // Don't request blocks that go further than what limited peers can provide
1534 if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) {
1535 continue;
1536 }
1537
1538 vBlocks.push_back(pindex);
1539 if (vBlocks.size() == count) {
1540 return;
1541 }
1542 }
1543 }
1544}
1545
1546} // namespace
1547
1548void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1549{
1550 uint64_t my_services;
1551 int64_t my_time;
1552 uint64_t your_services;
1553 CService your_addr;
1554 std::string my_user_agent;
1555 int my_height;
1556 bool my_tx_relay;
1557 if (pnode.IsPrivateBroadcastConn()) {
1558 my_services = NODE_NONE;
1559 my_time = 0;
1560 your_services = NODE_NONE;
1561 your_addr = CService{};
1562 my_user_agent = "/pynode:0.0.1/"; // Use a constant other than the default (or user-configured). See https://github.com/bitcoin/bitcoin/pull/27509#discussion_r1214671917
1563 my_height = 0;
1564 my_tx_relay = false;
1565 } else {
1566 const CAddress& addr{pnode.addr};
1567 my_services = peer.m_our_services;
1569 your_services = addr.nServices;
1570 your_addr = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? CService{addr} : CService{};
1571 my_user_agent = strSubVersion;
1572 my_height = m_best_height;
1573 my_tx_relay = !RejectIncomingTxs(pnode);
1574 }
1575
1576 MakeAndPushMessage(
1577 pnode,
1580 my_services,
1581 my_time,
1582 // your_services + CNetAddr::V1(your_addr) is the pre-version-31402 serialization of your_addr (without nTime)
1583 your_services, CNetAddr::V1(your_addr),
1584 // same, for a dummy address
1585 my_services, CNetAddr::V1(CService{}),
1586 pnode.GetLocalNonce(),
1587 my_user_agent,
1588 my_height,
1589 my_tx_relay);
1590
1591 LogDebug(
1592 BCLog::NET, "send version message: version=%d, blocks=%d%s, txrelay=%d, peer=%d\n",
1593 PROTOCOL_VERSION, my_height,
1594 fLogIPs ? strprintf(", them=%s", your_addr.ToStringAddrPort()) : "",
1595 my_tx_relay, pnode.GetId());
1596}
1597
1598void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1599{
1600 LOCK(cs_main);
1601 CNodeState *state = State(node);
1602 if (state) state->m_last_block_announcement = time_in_seconds;
1603}
1604
1605void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services)
1606{
1607 NodeId nodeid = node.GetId();
1608 {
1609 LOCK(cs_main); // For m_node_states
1610 m_node_states.try_emplace(m_node_states.end(), nodeid);
1611 }
1612 WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid));
1613
1615 our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
1616 }
1617
1618 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn());
1619 {
1620 LOCK(m_peer_mutex);
1621 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1622 }
1623}
1624
1625void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1626{
1627 std::set<Txid> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1628
1629 for (const auto& txid : unbroadcast_txids) {
1630 CTransactionRef tx = m_mempool.get(txid);
1631
1632 if (tx != nullptr) {
1633 InitiateTxBroadcastToAll(txid, tx->GetWitnessHash());
1634 } else {
1635 m_mempool.RemoveUnbroadcastTx(txid, true);
1636 }
1637 }
1638
1639 // Schedule next run for 10-15 minutes in the future.
1640 // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1641 const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1642 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1643}
1644
1645void PeerManagerImpl::ReattemptPrivateBroadcast(CScheduler& scheduler)
1646{
1647 // Remove stale transactions that are no longer relevant (e.g. already in
1648 // the mempool or mined) and count the remaining ones.
1649 size_t num_for_rebroadcast{0};
1650 const auto stale_txs = m_tx_for_private_broadcast.GetStale();
1651 if (!stale_txs.empty()) {
1652 LOCK(cs_main);
1653 for (const auto& stale_tx : stale_txs) {
1654 auto mempool_acceptable = m_chainman.ProcessTransaction(stale_tx, /*test_accept=*/true);
1655 if (mempool_acceptable.m_result_type == MempoolAcceptResult::ResultType::VALID) {
1657 "Reattempting broadcast of stale txid=%s wtxid=%s",
1658 stale_tx->GetHash().ToString(), stale_tx->GetWitnessHash().ToString());
1659 ++num_for_rebroadcast;
1660 } else {
1661 LogDebug(BCLog::PRIVBROADCAST, "Giving up broadcast attempts for txid=%s wtxid=%s: %s",
1662 stale_tx->GetHash().ToString(), stale_tx->GetWitnessHash().ToString(),
1663 mempool_acceptable.m_state.ToString());
1664 m_tx_for_private_broadcast.Remove(stale_tx);
1665 }
1666 }
1667
1668 // This could overshoot, but that is ok - we will open some private connections in vain.
1669 m_connman.m_private_broadcast.NumToOpenAdd(num_for_rebroadcast);
1670 }
1671
1672 const auto delta{2min + FastRandomContext().randrange<std::chrono::milliseconds>(1min)};
1673 scheduler.scheduleFromNow([&] { ReattemptPrivateBroadcast(scheduler); }, delta);
1674}
1675
1676void PeerManagerImpl::FinalizeNode(const CNode& node)
1677{
1678 NodeId nodeid = node.GetId();
1679 {
1680 LOCK(cs_main);
1681 {
1682 // We remove the PeerRef from g_peer_map here, but we don't always
1683 // destruct the Peer. Sometimes another thread is still holding a
1684 // PeerRef, so the refcount is >= 1. Be careful not to do any
1685 // processing here that assumes Peer won't be changed before it's
1686 // destructed.
1687 PeerRef peer = RemovePeer(nodeid);
1688 assert(peer != nullptr);
1689 m_wtxid_relay_peers -= peer->m_wtxid_relay;
1690 assert(m_wtxid_relay_peers >= 0);
1691 }
1692 CNodeState *state = State(nodeid);
1693 assert(state != nullptr);
1694
1695 if (state->fSyncStarted)
1696 nSyncStarted--;
1697
1698 for (const QueuedBlock& entry : state->vBlocksInFlight) {
1699 auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1700 while (range.first != range.second) {
1701 auto [node_id, list_it] = range.first->second;
1702 if (node_id != nodeid) {
1703 range.first++;
1704 } else {
1705 range.first = mapBlocksInFlight.erase(range.first);
1706 }
1707 }
1708 }
1709 {
1710 LOCK(m_tx_download_mutex);
1711 m_txdownloadman.DisconnectedPeer(nodeid);
1712 }
1713 if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1714 m_num_preferred_download_peers -= state->fPreferredDownload;
1715 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1716 assert(m_peers_downloading_from >= 0);
1717 m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1718 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1719
1720 m_node_states.erase(nodeid);
1721
1722 if (m_node_states.empty()) {
1723 // Do a consistency check after the last peer is removed.
1724 assert(mapBlocksInFlight.empty());
1725 assert(m_num_preferred_download_peers == 0);
1726 assert(m_peers_downloading_from == 0);
1727 assert(m_outbound_peers_with_protect_from_disconnect == 0);
1728 assert(m_wtxid_relay_peers == 0);
1729 WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty());
1730 }
1731 } // cs_main
1732 if (node.fSuccessfullyConnected &&
1733 !node.IsBlockOnlyConn() && !node.IsPrivateBroadcastConn() && !node.IsInboundConn()) {
1734 // Only change visible addrman state for full outbound peers. We don't
1735 // call Connected() for feeler connections since they don't have
1736 // fSuccessfullyConnected set. Also don't call Connected() for private broadcast
1737 // connections since they could leak information in addrman.
1738 m_addrman.Connected(node.addr);
1739 }
1740 {
1741 LOCK(m_headers_presync_mutex);
1742 m_headers_presync_stats.erase(nodeid);
1743 }
1744 if (node.IsPrivateBroadcastConn() &&
1745 !m_tx_for_private_broadcast.DidNodeConfirmReception(nodeid) &&
1746 m_tx_for_private_broadcast.HavePendingTransactions()) {
1747
1748 m_connman.m_private_broadcast.NumToOpenAdd(1);
1749 }
1750 LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1751}
1752
1753bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
1754{
1755 // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
1756 return !(GetDesirableServiceFlags(services) & (~services));
1757}
1758
1759ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
1760{
1761 if (services & NODE_NETWORK_LIMITED) {
1762 // Limited peers are desirable when we are close to the tip.
1763 if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
1765 }
1766 }
1768}
1769
1770PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1771{
1772 LOCK(m_peer_mutex);
1773 auto it = m_peer_map.find(id);
1774 return it != m_peer_map.end() ? it->second : nullptr;
1775}
1776
1777PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1778{
1779 PeerRef ret;
1780 LOCK(m_peer_mutex);
1781 auto it = m_peer_map.find(id);
1782 if (it != m_peer_map.end()) {
1783 ret = std::move(it->second);
1784 m_peer_map.erase(it);
1785 }
1786 return ret;
1787}
1788
1789bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1790{
1791 {
1792 LOCK(cs_main);
1793 const CNodeState* state = State(nodeid);
1794 if (state == nullptr)
1795 return false;
1796 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1797 stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1798 for (const QueuedBlock& queue : state->vBlocksInFlight) {
1799 if (queue.pindex)
1800 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1801 }
1802 }
1803
1804 PeerRef peer = GetPeerRef(nodeid);
1805 if (peer == nullptr) return false;
1806 stats.their_services = peer->m_their_services;
1807 stats.m_starting_height = peer->m_starting_height;
1808 // It is common for nodes with good ping times to suddenly become lagged,
1809 // due to a new block arriving or other large transfer.
1810 // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1811 // since pingtime does not update until the ping is complete, which might take a while.
1812 // So, if a ping is taking an unusually long time in flight,
1813 // the caller can immediately detect that this is happening.
1814 auto ping_wait{0us};
1815 if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1816 ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1817 }
1818
1819 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1820 stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1821 stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1822 LOCK(tx_relay->m_tx_inventory_mutex);
1823 stats.m_last_inv_seq = tx_relay->m_last_inv_sequence;
1824 stats.m_inv_to_send = tx_relay->m_tx_inventory_to_send.size();
1825 } else {
1826 stats.m_relay_txs = false;
1827 stats.m_fee_filter_received = 0;
1828 stats.m_inv_to_send = 0;
1829 }
1830
1831 stats.m_ping_wait = ping_wait;
1832 stats.m_addr_processed = peer->m_addr_processed.load();
1833 stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1834 stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1835 {
1836 LOCK(peer->m_headers_sync_mutex);
1837 if (peer->m_headers_sync) {
1838 stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1839 }
1840 }
1841 stats.time_offset = peer->m_time_offset;
1842
1843 return true;
1844}
1845
1846std::vector<node::TxOrphanage::OrphanInfo> PeerManagerImpl::GetOrphanTransactions()
1847{
1848 LOCK(m_tx_download_mutex);
1849 return m_txdownloadman.GetOrphanTransactions();
1850}
1851
1852PeerManagerInfo PeerManagerImpl::GetInfo() const
1853{
1854 return PeerManagerInfo{
1855 .median_outbound_time_offset = m_outbound_time_offsets.Median(),
1856 .ignores_incoming_txs = m_opts.ignore_incoming_txs,
1857 };
1858}
1859
1860std::vector<PrivateBroadcast::TxBroadcastInfo> PeerManagerImpl::GetPrivateBroadcastInfo() const
1861{
1862 return m_tx_for_private_broadcast.GetBroadcastInfo();
1863}
1864
1865std::vector<CTransactionRef> PeerManagerImpl::AbortPrivateBroadcast(const uint256& id)
1866{
1867 const auto snapshot{m_tx_for_private_broadcast.GetBroadcastInfo()};
1868 std::vector<CTransactionRef> removed_txs;
1869
1870 size_t connections_cancelled{0};
1871 for (const auto& [tx, _] : snapshot) {
1872 if (tx->GetHash().ToUint256() != id && tx->GetWitnessHash().ToUint256() != id) continue;
1873 if (const auto peer_acks{m_tx_for_private_broadcast.Remove(tx)}) {
1874 removed_txs.push_back(tx);
1875 if (NUM_PRIVATE_BROADCAST_PER_TX > *peer_acks) {
1876 connections_cancelled += (NUM_PRIVATE_BROADCAST_PER_TX - *peer_acks);
1877 }
1878 }
1879 }
1880 m_connman.m_private_broadcast.NumToOpenSub(connections_cancelled);
1881
1882 return removed_txs;
1883}
1884
1885void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1886{
1887 if (m_opts.max_extra_txs <= 0)
1888 return;
1889 if (!vExtraTxnForCompact.size())
1890 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1891 vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1892 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1893}
1894
1895void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message)
1896{
1897 LOCK(peer.m_misbehavior_mutex);
1898
1899 const std::string message_prefixed = message.empty() ? "" : (": " + message);
1900 peer.m_should_discourage = true;
1901 LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed);
1902 TRACEPOINT(net, misbehaving_connection,
1903 peer.m_id,
1904 message.c_str()
1905 );
1906}
1907
1908void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1909 bool via_compact_block, const std::string& message)
1910{
1911 PeerRef peer{GetPeerRef(nodeid)};
1912 switch (state.GetResult()) {
1914 break;
1916 // We didn't try to process the block because the header chain may have
1917 // too little work.
1918 break;
1919 // The node is providing invalid data:
1922 if (!via_compact_block) {
1923 if (peer) Misbehaving(*peer, message);
1924 return;
1925 }
1926 break;
1928 {
1929 // Discourage outbound (but not inbound) peers if on an invalid chain.
1930 // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1931 if (peer && !via_compact_block && !peer->m_is_inbound) {
1932 if (peer) Misbehaving(*peer, message);
1933 return;
1934 }
1935 break;
1936 }
1939 if (peer) Misbehaving(*peer, message);
1940 return;
1941 // Conflicting (but not necessarily invalid) data or different policy:
1943 if (peer) Misbehaving(*peer, message);
1944 return;
1946 break;
1947 }
1948 if (message != "") {
1949 LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1950 }
1951}
1952
1953bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex& block_index)
1954{
1956 if (m_chainman.ActiveChain().Contains(&block_index)) return true;
1957 return block_index.IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1958 (m_chainman.m_best_header->GetBlockTime() - block_index.GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1959 (GetBlockProofEquivalentTime(*m_chainman.m_best_header, block_index, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1960}
1961
1962util::Expected<void, std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1963{
1964 if (m_chainman.m_blockman.LoadingBlocks()) return util::Unexpected{"Loading blocks ..."};
1965
1966 // Ensure this peer exists and hasn't been disconnected
1967 PeerRef peer = GetPeerRef(peer_id);
1968 if (peer == nullptr) return util::Unexpected{"Peer does not exist"};
1969
1970 // Ignore pre-segwit peers
1971 if (!CanServeWitnesses(*peer)) return util::Unexpected{"Pre-SegWit peer"};
1972
1973 LOCK(cs_main);
1974
1975 // Forget about all prior requests
1976 RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1977
1978 // Mark block as in-flight
1979 if (!BlockRequested(peer_id, block_index)) return util::Unexpected{"Already requested from this peer"};
1980
1981 // Construct message to request the block
1982 const uint256& hash{block_index.GetBlockHash()};
1983 std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1984
1985 // Send block request message to the peer
1986 bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1987 this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
1988 return true;
1989 });
1990
1991 if (!success) return util::Unexpected{"Peer not fully connected"};
1992
1993 LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n",
1994 hash.ToString(), peer_id);
1995 return {};
1996}
1997
1998std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1999 BanMan* banman, ChainstateManager& chainman,
2000 CTxMemPool& pool, node::Warnings& warnings, Options opts)
2001{
2002 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
2003}
2004
2005PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
2006 BanMan* banman, ChainstateManager& chainman,
2007 CTxMemPool& pool, node::Warnings& warnings, Options opts)
2008 : m_rng{opts.deterministic_rng},
2009 m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
2010 m_chainparams(chainman.GetParams()),
2011 m_connman(connman),
2012 m_addrman(addrman),
2013 m_banman(banman),
2014 m_chainman(chainman),
2015 m_mempool(pool),
2016 m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.deterministic_rng}),
2017 m_warnings{warnings},
2018 m_opts{opts}
2019{
2020 // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
2021 // This argument can go away after Erlay support is complete.
2022 if (opts.reconcile_txs) {
2023 m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
2024 }
2025}
2026
2027void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
2028{
2029 // Stale tip checking and peer eviction are on two different timers, but we
2030 // don't want them to get out of sync due to drift in the scheduler, so we
2031 // combine them in one function and schedule at the quicker (peer-eviction)
2032 // timer.
2033 static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
2034 scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2035
2036 // schedule next run for 10-15 minutes in the future
2037 const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2038 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
2039
2040 if (m_opts.private_broadcast) {
2041 scheduler.scheduleFromNow([&] { ReattemptPrivateBroadcast(scheduler); }, 0min);
2042 }
2043}
2044
2045void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd)
2046{
2047 // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding
2048 // m_tx_download_mutex waits on the mempool mutex.
2049 AssertLockNotHeld(m_mempool.cs);
2050 AssertLockNotHeld(m_tx_download_mutex);
2051
2052 if (!is_ibd) {
2053 LOCK(m_tx_download_mutex);
2054 // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due
2055 // to a timelock. Reset the rejection filters to give those transactions another chance if we
2056 // see them again.
2057 m_txdownloadman.ActiveTipChange();
2058 }
2059}
2060
2067void PeerManagerImpl::BlockConnected(
2068 const ChainstateRole& role,
2069 const std::shared_ptr<const CBlock>& pblock,
2070 const CBlockIndex* pindex)
2071{
2072 // Update this for all chainstate roles so that we don't mistakenly see peers
2073 // helping us do background IBD as having a stale tip.
2074 m_last_tip_update = GetTime<std::chrono::seconds>();
2075
2076 // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
2077 auto stalling_timeout = m_block_stalling_timeout.load();
2078 Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
2079 if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
2080 const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
2081 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
2082 LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
2083 }
2084 }
2085
2086 // The following task can be skipped since we don't maintain a mempool for
2087 // the historical chainstate, or during ibd since we don't receive incoming
2088 // transactions from peers into the mempool.
2089 if (!role.historical && !m_chainman.IsInitialBlockDownload()) {
2090 LOCK(m_tx_download_mutex);
2091 m_txdownloadman.BlockConnected(pblock);
2092 }
2093}
2094
2095void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
2096{
2097 LOCK(m_tx_download_mutex);
2098 m_txdownloadman.BlockDisconnected();
2099}
2100
2105void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
2106{
2107 auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64());
2108
2109 LOCK(cs_main);
2110
2111 if (pindex->nHeight <= m_highest_fast_announce)
2112 return;
2113 m_highest_fast_announce = pindex->nHeight;
2114
2115 if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
2116
2117 uint256 hashBlock(pblock->GetHash());
2118 const std::shared_future<CSerializedNetMsg> lazy_ser{
2119 std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
2120
2121 {
2122 auto most_recent_block_txs = std::make_unique<std::map<GenTxid, CTransactionRef>>();
2123 for (const auto& tx : pblock->vtx) {
2124 most_recent_block_txs->emplace(tx->GetHash(), tx);
2125 most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2126 }
2127
2128 LOCK(m_most_recent_block_mutex);
2129 m_most_recent_block_hash = hashBlock;
2130 m_most_recent_block = pblock;
2131 m_most_recent_compact_block = pcmpctblock;
2132 m_most_recent_block_txs = std::move(most_recent_block_txs);
2133 }
2134
2135 m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2137
2139 return;
2140 ProcessBlockAvailability(pnode->GetId());
2141 CNodeState &state = *State(pnode->GetId());
2142 // If the peer has, or we announced to them the previous block already,
2143 // but we don't think they have this one, go ahead and announce it
2144 if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2145
2146 LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2147 hashBlock.ToString(), pnode->GetId());
2148
2149 const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2150 PushMessage(*pnode, ser_cmpctblock.Copy());
2151 state.pindexBestHeaderSent = pindex;
2152 }
2153 });
2154}
2155
2160void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2161{
2162 SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2163
2164 // Don't relay inventory during initial block download.
2165 if (fInitialDownload) return;
2166
2167 // Find the hashes of all blocks that weren't previously in the best chain.
2168 std::vector<uint256> vHashes;
2169 const CBlockIndex *pindexToAnnounce = pindexNew;
2170 while (pindexToAnnounce != pindexFork) {
2171 vHashes.push_back(pindexToAnnounce->GetBlockHash());
2172 pindexToAnnounce = pindexToAnnounce->pprev;
2173 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2174 // Limit announcements in case of a huge reorganization.
2175 // Rely on the peer's synchronization mechanism in that case.
2176 break;
2177 }
2178 }
2179
2180 {
2181 LOCK(m_peer_mutex);
2182 for (auto& it : m_peer_map) {
2183 Peer& peer = *it.second;
2184 LOCK(peer.m_block_inv_mutex);
2185 for (const uint256& hash : vHashes | std::views::reverse) {
2186 peer.m_blocks_for_headers_relay.push_back(hash);
2187 }
2188 }
2189 }
2190
2191 m_connman.WakeMessageHandler();
2192}
2193
2198void PeerManagerImpl::BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state)
2199{
2200 LOCK(cs_main);
2201
2202 const uint256 hash(block->GetHash());
2203 std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2204
2205 // If the block failed validation, we know where it came from and we're still connected
2206 // to that peer, maybe punish.
2207 if (state.IsInvalid() &&
2208 it != mapBlockSource.end() &&
2209 State(it->second.first)) {
2210 MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2211 }
2212 // Check that:
2213 // 1. The block is valid
2214 // 2. We're not in initial block download
2215 // 3. This is currently the best block we're aware of. We haven't updated
2216 // the tip yet so we have no way to check this directly here. Instead we
2217 // just check that there are currently no other blocks in flight.
2218 else if (state.IsValid() &&
2219 !m_chainman.IsInitialBlockDownload() &&
2220 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2221 if (it != mapBlockSource.end()) {
2222 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2223 }
2224 }
2225 if (it != mapBlockSource.end())
2226 mapBlockSource.erase(it);
2227}
2228
2230//
2231// Messages
2232//
2233
2234bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2235{
2236 return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2237}
2238
2239void PeerManagerImpl::SendPings()
2240{
2241 LOCK(m_peer_mutex);
2242 for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2243}
2244
2245void PeerManagerImpl::InitiateTxBroadcastToAll(const Txid& txid, const Wtxid& wtxid)
2246{
2247 LOCK(m_peer_mutex);
2248 for(auto& it : m_peer_map) {
2249 Peer& peer = *it.second;
2250 auto tx_relay = peer.GetTxRelay();
2251 if (!tx_relay) continue;
2252
2253 LOCK(tx_relay->m_tx_inventory_mutex);
2254 // Only queue transactions for announcement once the version handshake
2255 // is completed. The time of arrival for these transactions is
2256 // otherwise at risk of leaking to a spy, if the spy is able to
2257 // distinguish transactions received during the handshake from the rest
2258 // in the announcement.
2259 if (tx_relay->m_next_inv_send_time == 0s) continue;
2260
2261 const uint256& hash{peer.m_wtxid_relay ? wtxid.ToUint256() : txid.ToUint256()};
2262 if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2263 tx_relay->m_tx_inventory_to_send.insert(wtxid);
2264 }
2265 }
2266}
2267
2268void PeerManagerImpl::InitiateTxBroadcastPrivate(const CTransactionRef& tx)
2269{
2270 const auto txstr{strprintf("txid=%s, wtxid=%s", tx->GetHash().ToString(), tx->GetWitnessHash().ToString())};
2271 if (m_tx_for_private_broadcast.Add(tx)) {
2272 LogDebug(BCLog::PRIVBROADCAST, "Requesting %d new connections due to %s", NUM_PRIVATE_BROADCAST_PER_TX, txstr);
2274 } else {
2275 LogDebug(BCLog::PRIVBROADCAST, "Ignoring unnecessary request to schedule an already scheduled transaction: %s", txstr);
2276 }
2277}
2278
2279void PeerManagerImpl::RelayAddress(NodeId originator,
2280 const CAddress& addr,
2281 bool fReachable)
2282{
2283 // We choose the same nodes within a given 24h window (if the list of connected
2284 // nodes does not change) and we don't relay to nodes that already know an
2285 // address. So within 24h we will likely relay a given address once. This is to
2286 // prevent a peer from unjustly giving their address better propagation by sending
2287 // it to us repeatedly.
2288
2289 if (!fReachable && !addr.IsRelayable()) return;
2290
2291 // Relay to a limited number of other nodes
2292 // Use deterministic randomness to send to the same nodes for 24 hours
2293 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2294 const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2295 const auto current_time{GetTime<std::chrono::seconds>()};
2296 // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2297 const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2298 const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
2299 .Write(hash_addr)
2300 .Write(time_addr)};
2301
2302 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2303 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2304
2305 std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2306 assert(nRelayNodes <= best.size());
2307
2308 LOCK(m_peer_mutex);
2309
2310 for (auto& [id, peer] : m_peer_map) {
2311 if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2312 uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2313 for (unsigned int i = 0; i < nRelayNodes; i++) {
2314 if (hashKey > best[i].first) {
2315 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2316 best[i] = std::make_pair(hashKey, peer.get());
2317 break;
2318 }
2319 }
2320 }
2321 };
2322
2323 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2324 PushAddress(*best[i].second, addr);
2325 }
2326}
2327
2328void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2329{
2330 std::shared_ptr<const CBlock> a_recent_block;
2331 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2332 {
2333 LOCK(m_most_recent_block_mutex);
2334 a_recent_block = m_most_recent_block;
2335 a_recent_compact_block = m_most_recent_compact_block;
2336 }
2337
2338 bool need_activate_chain = false;
2339 {
2340 LOCK(cs_main);
2341 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2342 if (pindex) {
2343 if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2344 pindex->IsValid(BLOCK_VALID_TREE)) {
2345 // If we have the block and all of its parents, but have not yet validated it,
2346 // we might be in the middle of connecting it (ie in the unlock of cs_main
2347 // before ActivateBestChain but after AcceptBlock).
2348 // In this case, we need to run ActivateBestChain prior to checking the relay
2349 // conditions below.
2350 need_activate_chain = true;
2351 }
2352 }
2353 } // release cs_main before calling ActivateBestChain
2354 if (need_activate_chain) {
2355 BlockValidationState state;
2356 if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2357 LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2358 }
2359 }
2360
2361 const CBlockIndex* pindex{nullptr};
2362 const CBlockIndex* tip{nullptr};
2363 bool can_direct_fetch{false};
2364 FlatFilePos block_pos{};
2365 {
2366 LOCK(cs_main);
2367 pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2368 if (!pindex) {
2369 return;
2370 }
2371 if (!BlockRequestAllowed(*pindex)) {
2372 LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2373 return;
2374 }
2375 // disconnect node in case we have reached the outbound limit for serving historical blocks
2376 if (m_connman.OutboundTargetReached(true) &&
2377 (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2378 !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2379 ) {
2380 LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
2381 pfrom.fDisconnect = true;
2382 return;
2383 }
2384 tip = m_chainman.ActiveChain().Tip();
2385 // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2387 (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2388 )) {
2389 LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs));
2390 //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2391 pfrom.fDisconnect = true;
2392 return;
2393 }
2394 // Pruned nodes may have deleted the block, so check whether
2395 // it's available before trying to send.
2396 if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2397 return;
2398 }
2399 can_direct_fetch = CanDirectFetch();
2400 block_pos = pindex->GetBlockPos();
2401 }
2402
2403 std::shared_ptr<const CBlock> pblock;
2404 if (a_recent_block && a_recent_block->GetHash() == inv.hash) {
2405 pblock = a_recent_block;
2406 } else if (inv.IsMsgWitnessBlk()) {
2407 // Fast-path: in this case it is possible to serve the block directly from disk,
2408 // as the network format matches the format on disk
2409 if (const auto block_data{m_chainman.m_blockman.ReadRawBlock(block_pos)}) {
2410 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, std::span{*block_data});
2411 } else {
2412 if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2413 LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2414 } else {
2415 LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2416 }
2417 pfrom.fDisconnect = true;
2418 return;
2419 }
2420 // Don't set pblock as we've sent the block
2421 } else {
2422 // Send block from disk
2423 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2424 if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos, inv.hash)) {
2425 if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2426 LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
2427 } else {
2428 LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
2429 }
2430 pfrom.fDisconnect = true;
2431 return;
2432 }
2433 pblock = pblockRead;
2434 }
2435 if (pblock) {
2436 if (inv.IsMsgBlk()) {
2437 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock));
2438 } else if (inv.IsMsgWitnessBlk()) {
2439 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2440 } else if (inv.IsMsgFilteredBlk()) {
2441 bool sendMerkleBlock = false;
2442 CMerkleBlock merkleBlock;
2443 if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2444 LOCK(tx_relay->m_bloom_filter_mutex);
2445 if (tx_relay->m_bloom_filter) {
2446 sendMerkleBlock = true;
2447 merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2448 }
2449 }
2450 if (sendMerkleBlock) {
2451 MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
2452 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2453 // This avoids hurting performance by pointlessly requiring a round-trip
2454 // Note that there is currently no way for a node to request any single transactions we didn't send here -
2455 // they must either disconnect and retry or request the full block.
2456 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2457 // however we MUST always provide at least what the remote peer needs
2458 for (const auto& [tx_idx, _] : merkleBlock.vMatchedTxn)
2459 MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[tx_idx]));
2460 }
2461 // else
2462 // no response
2463 } else if (inv.IsMsgCmpctBlk()) {
2464 // If a peer is asking for old blocks, we're almost guaranteed
2465 // they won't have a useful mempool to match against a compact block,
2466 // and we don't feel like constructing the object for them, so
2467 // instead we respond with the full, non-compact block.
2468 if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
2469 if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == inv.hash) {
2470 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
2471 } else {
2472 CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()};
2473 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock);
2474 }
2475 } else {
2476 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2477 }
2478 }
2479 }
2480
2481 {
2482 LOCK(peer.m_block_inv_mutex);
2483 // Trigger the peer node to send a getblocks request for the next batch of inventory
2484 if (inv.hash == peer.m_continuation_block) {
2485 // Send immediately. This must send even if redundant,
2486 // and we want it right after the last block so they don't
2487 // wait for other stuff first.
2488 std::vector<CInv> vInv;
2489 vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash());
2490 MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
2491 peer.m_continuation_block.SetNull();
2492 }
2493 }
2494}
2495
2496CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2497{
2498 // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2499 auto txinfo{std::visit(
2500 [&](const auto& id) {
2501 return m_mempool.info_for_relay(id, WITH_LOCK(tx_relay.m_tx_inventory_mutex, return tx_relay.m_last_inv_sequence));
2502 },
2503 gtxid)};
2504 if (txinfo.tx) {
2505 return std::move(txinfo.tx);
2506 }
2507
2508 // Or it might be from the most recent block
2509 {
2510 LOCK(m_most_recent_block_mutex);
2511 if (m_most_recent_block_txs != nullptr) {
2512 auto it = m_most_recent_block_txs->find(gtxid);
2513 if (it != m_most_recent_block_txs->end()) return it->second;
2514 }
2515 }
2516
2517 return {};
2518}
2519
2520void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2521{
2523
2524 auto tx_relay = peer.GetTxRelay();
2525
2526 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2527 std::vector<CInv> vNotFound;
2528
2529 // Process as many TX items from the front of the getdata queue as
2530 // possible, since they're common and it's efficient to batch process
2531 // them.
2532 while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2533 if (interruptMsgProc) return;
2534 // The send buffer provides backpressure. If there's no space in
2535 // the buffer, pause processing until the next call.
2536 if (pfrom.fPauseSend) break;
2537
2538 const CInv &inv = *it++;
2539
2540 if (tx_relay == nullptr) {
2541 // Ignore GETDATA requests for transactions from block-relay-only
2542 // peers and peers that asked us not to announce transactions.
2543 continue;
2544 }
2545
2546 if (auto tx{FindTxForGetData(*tx_relay, ToGenTxid(inv))}) {
2547 // WTX and WITNESS_TX imply we serialize with witness
2548 const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS);
2549 MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx));
2550 m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2551 } else {
2552 vNotFound.push_back(inv);
2553 }
2554 }
2555
2556 // Only process one BLOCK item per call, since they're uncommon and can be
2557 // expensive to process.
2558 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2559 const CInv &inv = *it++;
2560 if (inv.IsGenBlkMsg()) {
2561 ProcessGetBlockData(pfrom, peer, inv);
2562 }
2563 // else: If the first item on the queue is an unknown type, we erase it
2564 // and continue processing the queue on the next call.
2565 // NOTE: previously we wouldn't do so and the peer sending us a malformed GETDATA could
2566 // result in never making progress and this thread using 100% allocated CPU. See
2567 // https://bitcoincore.org/en/2024/07/03/disclose-getdata-cpu.
2568 }
2569
2570 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2571
2572 if (!vNotFound.empty()) {
2573 // Let the peer know that we didn't find what it asked for, so it doesn't
2574 // have to wait around forever.
2575 // SPV clients care about this message: it's needed when they are
2576 // recursively walking the dependencies of relevant unconfirmed
2577 // transactions. SPV clients want to do that because they want to know
2578 // about (and store and rebroadcast and risk analyze) the dependencies
2579 // of transactions relevant to them, without having to download the
2580 // entire memory pool.
2581 // Also, other nodes can use these messages to automatically request a
2582 // transaction from some other peer that announced it, and stop
2583 // waiting for us to respond.
2584 // In normal operation, we often send NOTFOUND messages for parents of
2585 // transactions that we relay; if a peer is missing a parent, they may
2586 // assume we have them and request the parents from us.
2587 MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
2588 }
2589}
2590
2591uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2592{
2593 uint32_t nFetchFlags = 0;
2594 if (CanServeWitnesses(peer)) {
2595 nFetchFlags |= MSG_WITNESS_FLAG;
2596 }
2597 return nFetchFlags;
2598}
2599
2600void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2601{
2602 BlockTransactions resp(req);
2603 for (size_t i = 0; i < req.indexes.size(); i++) {
2604 if (req.indexes[i] >= block.vtx.size()) {
2605 Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
2606 return;
2607 }
2608 resp.txn[i] = block.vtx[req.indexes[i]];
2609 }
2610
2611 if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
2612 uint32_t tx_requested_size{0};
2613 for (const auto& tx : resp.txn) tx_requested_size += tx->ComputeTotalSize();
2614 LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size);
2615 }
2616 MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
2617}
2618
2619bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer)
2620{
2621 // Do these headers have proof-of-work matching what's claimed?
2622 if (!HasValidProofOfWork(headers, m_chainparams.GetConsensus())) {
2623 Misbehaving(peer, "header with invalid proof of work");
2624 return false;
2625 }
2626
2627 // Are these headers connected to each other?
2628 if (!CheckHeadersAreContinuous(headers)) {
2629 Misbehaving(peer, "non-continuous headers sequence");
2630 return false;
2631 }
2632 return true;
2633}
2634
2635arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2636{
2637 arith_uint256 near_chaintip_work = 0;
2638 LOCK(cs_main);
2639 if (m_chainman.ActiveChain().Tip() != nullptr) {
2640 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2641 // Use a 144 block buffer, so that we'll accept headers that fork from
2642 // near our tip.
2643 near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2644 }
2645 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2646}
2647
2654void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer,
2655 const std::vector<CBlockHeader>& headers)
2656{
2657 // Try to fill in the missing headers.
2658 const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2659 if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2660 LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
2661 headers[0].GetHash().ToString(),
2662 headers[0].hashPrevBlock.ToString(),
2663 best_header->nHeight,
2664 pfrom.GetId());
2665 }
2666
2667 // Set hashLastUnknownBlock for this peer, so that if we
2668 // eventually get the headers - even from a different peer -
2669 // we can use this peer to download.
2670 WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2671}
2672
2673bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2674{
2675 uint256 hashLastBlock;
2676 for (const CBlockHeader& header : headers) {
2677 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2678 return false;
2679 }
2680 hashLastBlock = header.GetHash();
2681 }
2682 return true;
2683}
2684
2685bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2686{
2687 if (peer.m_headers_sync) {
2688 auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
2689 // If it is a valid continuation, we should treat the existing getheaders request as responded to.
2690 if (result.success) peer.m_last_getheaders_timestamp = {};
2691 if (result.request_more) {
2692 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2693 // If we were instructed to ask for a locator, it should not be empty.
2694 Assume(!locator.vHave.empty());
2695 // We can only be instructed to request more if processing was successful.
2696 Assume(result.success);
2697 if (!locator.vHave.empty()) {
2698 // It should be impossible for the getheaders request to fail,
2699 // because we just cleared the last getheaders timestamp.
2700 bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2701 Assume(sent_getheaders);
2702 LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2703 locator.vHave.front().ToString(), pfrom.GetId());
2704 }
2705 }
2706
2707 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2708 peer.m_headers_sync.reset(nullptr);
2709
2710 // Delete this peer's entry in m_headers_presync_stats.
2711 // If this is m_headers_presync_bestpeer, it will be replaced later
2712 // by the next peer that triggers the else{} branch below.
2713 LOCK(m_headers_presync_mutex);
2714 m_headers_presync_stats.erase(pfrom.GetId());
2715 } else {
2716 // Build statistics for this peer's sync.
2717 HeadersPresyncStats stats;
2718 stats.first = peer.m_headers_sync->GetPresyncWork();
2719 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2720 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2721 peer.m_headers_sync->GetPresyncTime()};
2722 }
2723
2724 // Update statistics in stats.
2725 LOCK(m_headers_presync_mutex);
2726 m_headers_presync_stats[pfrom.GetId()] = stats;
2727 auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2728 bool best_updated = false;
2729 if (best_it == m_headers_presync_stats.end()) {
2730 // If the cached best peer is outdated, iterate over all remaining ones (including
2731 // newly updated one) to find the best one.
2732 NodeId peer_best{-1};
2733 const HeadersPresyncStats* stat_best{nullptr};
2734 for (const auto& [peer, stat] : m_headers_presync_stats) {
2735 if (!stat_best || stat > *stat_best) {
2736 peer_best = peer;
2737 stat_best = &stat;
2738 }
2739 }
2740 m_headers_presync_bestpeer = peer_best;
2741 best_updated = (peer_best == pfrom.GetId());
2742 } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2743 // pfrom was and remains the best peer, or pfrom just became best.
2744 m_headers_presync_bestpeer = pfrom.GetId();
2745 best_updated = true;
2746 }
2747 if (best_updated && stats.second.has_value()) {
2748 // If the best peer updated, and it is in its first phase, signal.
2749 m_headers_presync_should_signal = true;
2750 }
2751 }
2752
2753 if (result.success) {
2754 // We only overwrite the headers passed in if processing was
2755 // successful.
2756 headers.swap(result.pow_validated_headers);
2757 }
2758
2759 return result.success;
2760 }
2761 // Either we didn't have a sync in progress, or something went wrong
2762 // processing these headers, or we are returning headers to the caller to
2763 // process.
2764 return false;
2765}
2766
2767bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex& chain_start_header, std::vector<CBlockHeader>& headers)
2768{
2769 // Calculate the claimed total work on this chain.
2770 arith_uint256 total_work = chain_start_header.nChainWork + CalculateClaimedHeadersWork(headers);
2771
2772 // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2773 // before we'll store it)
2774 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2775
2776 // Avoid DoS via low-difficulty-headers by only processing if the headers
2777 // are part of a chain with sufficient work.
2778 if (total_work < minimum_chain_work) {
2779 // Only try to sync with this peer if their headers message was full;
2780 // otherwise they don't have more headers after this so no point in
2781 // trying to sync their too-little-work chain.
2782 if (headers.size() == m_opts.max_headers_result) {
2783 // Note: we could advance to the last header in this set that is
2784 // known to us, rather than starting at the first header (which we
2785 // may already have); however this is unlikely to matter much since
2786 // ProcessHeadersMessage() already handles the case where all
2787 // headers in a received message are already known and are
2788 // ancestors of m_best_header or chainActive.Tip(), by skipping
2789 // this logic in that case. So even if the first header in this set
2790 // of headers is known, some header in this set must be new, so
2791 // advancing to the first unknown header would be a small effect.
2792 LOCK(peer.m_headers_sync_mutex);
2793 peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2794 m_chainparams.HeadersSync(), chain_start_header, minimum_chain_work));
2795
2796 // Now a HeadersSyncState object for tracking this synchronization
2797 // is created, process the headers using it as normal. Failures are
2798 // handled inside of IsContinuationOfLowWorkHeadersSync.
2799 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2800 } else {
2801 LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header.nHeight + headers.size(), pfrom.GetId());
2802 }
2803
2804 // The peer has not yet given us a chain that meets our work threshold,
2805 // so we want to prevent further processing of the headers in any case.
2806 headers = {};
2807 return true;
2808 }
2809
2810 return false;
2811}
2812
2813bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2814{
2815 if (header == nullptr) {
2816 return false;
2817 } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2818 return true;
2819 } else if (m_chainman.ActiveChain().Contains(header)) {
2820 return true;
2821 }
2822 return false;
2823}
2824
2825bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2826{
2827 const auto current_time = NodeClock::now();
2828
2829 // Only allow a new getheaders message to go out if we don't have a recent
2830 // one already in-flight
2831 if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2832 MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
2833 peer.m_last_getheaders_timestamp = current_time;
2834 return true;
2835 }
2836 return false;
2837}
2838
2839/*
2840 * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2841 * We require that the given tip have at least as much work as our tip, and for
2842 * our current tip to be "close to synced" (see CanDirectFetch()).
2843 */
2844void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2845{
2846 LOCK(cs_main);
2847 CNodeState *nodestate = State(pfrom.GetId());
2848
2849 if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2850 std::vector<const CBlockIndex*> vToFetch;
2851 const CBlockIndex* pindexWalk{&last_header};
2852 // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2853 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2854 if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2855 !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2856 (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2857 // We don't have this block, and it's not yet in flight.
2858 vToFetch.push_back(pindexWalk);
2859 }
2860 pindexWalk = pindexWalk->pprev;
2861 }
2862 // If pindexWalk still isn't on our main chain, we're looking at a
2863 // very large reorg at a time we think we're close to caught up to
2864 // the main chain -- this shouldn't really happen. Bail out on the
2865 // direct fetch and rely on parallel download instead.
2866 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2867 LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2868 last_header.GetBlockHash().ToString(),
2869 last_header.nHeight);
2870 } else {
2871 std::vector<CInv> vGetData;
2872 // Download as much as possible, from earliest to latest.
2873 for (const CBlockIndex* pindex : vToFetch | std::views::reverse) {
2874 if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2875 // Can't download any more from this peer
2876 break;
2877 }
2878 uint32_t nFetchFlags = GetFetchFlags(peer);
2879 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2880 BlockRequested(pfrom.GetId(), *pindex);
2881 LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n",
2882 pindex->GetBlockHash().ToString(), pfrom.GetId());
2883 }
2884 if (vGetData.size() > 1) {
2885 LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2886 last_header.GetBlockHash().ToString(),
2887 last_header.nHeight);
2888 }
2889 if (vGetData.size() > 0) {
2890 if (!m_opts.ignore_incoming_txs &&
2891 nodestate->m_provides_cmpctblocks &&
2892 vGetData.size() == 1 &&
2893 mapBlocksInFlight.size() == 1 &&
2894 last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2895 // In any case, we want to download using a compact block, not a regular one
2896 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2897 }
2898 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
2899 }
2900 }
2901 }
2902}
2903
2909void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2910 const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2911{
2912 LOCK(cs_main);
2913 CNodeState *nodestate = State(pfrom.GetId());
2914
2915 UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2916
2917 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2918 // because it is set in UpdateBlockAvailability. Some nullptr checks
2919 // are still present, however, as belt-and-suspenders.
2920
2921 if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2922 nodestate->m_last_block_announcement = GetTime();
2923 }
2924
2925 // If we're in IBD, we want outbound peers that will serve us a useful
2926 // chain. Disconnect peers that are on chains with insufficient work.
2927 if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2928 // If the peer has no more headers to give us, then we know we have
2929 // their tip.
2930 if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2931 // This peer has too little work on their headers chain to help
2932 // us sync -- disconnect if it is an outbound disconnection
2933 // candidate.
2934 // Note: We compare their tip to the minimum chain work (rather than
2935 // m_chainman.ActiveChain().Tip()) because we won't start block download
2936 // until we have a headers chain that has at least
2937 // the minimum chain work, even if a peer has a chain past our tip,
2938 // as an anti-DoS measure.
2939 if (pfrom.IsOutboundOrBlockRelayConn()) {
2940 LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs));
2941 pfrom.fDisconnect = true;
2942 }
2943 }
2944 }
2945
2946 // If this is an outbound full-relay peer, check to see if we should protect
2947 // it from the bad/lagging chain logic.
2948 // Note that outbound block-relay peers are excluded from this protection, and
2949 // thus always subject to eviction under the bad/lagging chain logic.
2950 // See ChainSyncTimeoutState.
2951 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2952 if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2953 LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2954 nodestate->m_chain_sync.m_protect = true;
2955 ++m_outbound_peers_with_protect_from_disconnect;
2956 }
2957 }
2958}
2959
2960void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2961 std::vector<CBlockHeader>&& headers,
2962 bool via_compact_block)
2963{
2964 size_t nCount = headers.size();
2965
2966 if (nCount == 0) {
2967 // Nothing interesting. Stop asking this peers for more headers.
2968 // If we were in the middle of headers sync, receiving an empty headers
2969 // message suggests that the peer suddenly has nothing to give us
2970 // (perhaps it reorged to our chain). Clear download state for this peer.
2971 LOCK(peer.m_headers_sync_mutex);
2972 if (peer.m_headers_sync) {
2973 peer.m_headers_sync.reset(nullptr);
2974 LOCK(m_headers_presync_mutex);
2975 m_headers_presync_stats.erase(pfrom.GetId());
2976 }
2977 // A headers message with no headers cannot be an announcement, so assume
2978 // it is a response to our last getheaders request, if there is one.
2979 peer.m_last_getheaders_timestamp = {};
2980 return;
2981 }
2982
2983 // Before we do any processing, make sure these pass basic sanity checks.
2984 // We'll rely on headers having valid proof-of-work further down, as an
2985 // anti-DoS criteria (note: this check is required before passing any
2986 // headers into HeadersSyncState).
2987 if (!CheckHeadersPoW(headers, peer)) {
2988 // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2989 // just return. (Note that even if a header is announced via compact
2990 // block, the header itself should be valid, so this type of error can
2991 // always be punished.)
2992 return;
2993 }
2994
2995 const CBlockIndex *pindexLast = nullptr;
2996
2997 // We'll set already_validated_work to true if these headers are
2998 // successfully processed as part of a low-work headers sync in progress
2999 // (either in PRESYNC or REDOWNLOAD phase).
3000 // If true, this will mean that any headers returned to us (ie during
3001 // REDOWNLOAD) can be validated without further anti-DoS checks.
3002 bool already_validated_work = false;
3003
3004 // If we're in the middle of headers sync, let it do its magic.
3005 bool have_headers_sync = false;
3006 {
3007 LOCK(peer.m_headers_sync_mutex);
3008
3009 already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3010
3011 // The headers we passed in may have been:
3012 // - untouched, perhaps if no headers-sync was in progress, or some
3013 // failure occurred
3014 // - erased, such as if the headers were successfully processed and no
3015 // additional headers processing needs to take place (such as if we
3016 // are still in PRESYNC)
3017 // - replaced with headers that are now ready for validation, such as
3018 // during the REDOWNLOAD phase of a low-work headers sync.
3019 // So just check whether we still have headers that we need to process,
3020 // or not.
3021 if (headers.empty()) {
3022 return;
3023 }
3024
3025 have_headers_sync = !!peer.m_headers_sync;
3026 }
3027
3028 // Do these headers connect to something in our block index?
3029 const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
3030 bool headers_connect_blockindex{chain_start_header != nullptr};
3031
3032 if (!headers_connect_blockindex) {
3033 // This could be a BIP 130 block announcement, use
3034 // special logic for handling headers that don't connect, as this
3035 // could be benign.
3036 HandleUnconnectingHeaders(pfrom, peer, headers);
3037 return;
3038 }
3039
3040 // If headers connect, assume that this is in response to any outstanding getheaders
3041 // request we may have sent, and clear out the time of our last request. Non-connecting
3042 // headers cannot be a response to a getheaders request.
3043 peer.m_last_getheaders_timestamp = {};
3044
3045 // If the headers we received are already in memory and an ancestor of
3046 // m_best_header or our tip, skip anti-DoS checks. These headers will not
3047 // use any more memory (and we are not leaking information that could be
3048 // used to fingerprint us).
3049 const CBlockIndex *last_received_header{nullptr};
3050 {
3051 LOCK(cs_main);
3052 last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
3053 already_validated_work = already_validated_work || IsAncestorOfBestHeaderOrTip(last_received_header);
3054 }
3055
3056 // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
3057 // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
3058 // on startup).
3060 already_validated_work = true;
3061 }
3062
3063 // At this point, the headers connect to something in our block index.
3064 // Do anti-DoS checks to determine if we should process or store for later
3065 // processing.
3066 if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
3067 *chain_start_header, headers)) {
3068 // If we successfully started a low-work headers sync, then there
3069 // should be no headers to process any further.
3070 Assume(headers.empty());
3071 return;
3072 }
3073
3074 // At this point, we have a set of headers with sufficient work on them
3075 // which can be processed.
3076
3077 // If we don't have the last header, then this peer will have given us
3078 // something new (if these headers are valid).
3079 bool received_new_header{last_received_header == nullptr};
3080
3081 // Now process all the headers.
3082 BlockValidationState state;
3083 const bool processed{m_chainman.ProcessNewBlockHeaders(headers,
3084 /*min_pow_checked=*/true,
3085 state, &pindexLast)};
3086 if (!processed) {
3087 if (state.IsInvalid()) {
3089 // Warn user if outgoing peers send us headers of blocks that we previously marked as invalid.
3090 LogWarning("%s (received from peer=%i). "
3091 "If this happens with all peers, consider database corruption (that -reindex may fix) "
3092 "or a potential consensus incompatibility.",
3093 state.GetDebugMessage(), pfrom.GetId());
3094 }
3095 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
3096 return;
3097 }
3098 }
3099 assert(pindexLast);
3100
3101 if (processed && received_new_header) {
3102 LogBlockHeader(*pindexLast, pfrom, /*via_compact_block=*/false);
3103 }
3104
3105 // Consider fetching more headers if we are not using our headers-sync mechanism.
3106 if (nCount == m_opts.max_headers_result && !have_headers_sync) {
3107 // Headers message had its maximum size; the peer may have more headers.
3108 if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
3109 LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
3110 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
3111 }
3112 }
3113
3114 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
3115
3116 // Consider immediately downloading blocks.
3117 HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
3118
3119 return;
3120}
3121
3122std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state,
3123 bool first_time_failure)
3124{
3125 AssertLockNotHeld(m_peer_mutex);
3126 AssertLockHeld(g_msgproc_mutex);
3127 AssertLockHeld(m_tx_download_mutex);
3128
3129 PeerRef peer{GetPeerRef(nodeid)};
3130
3131 LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
3132 ptx->GetHash().ToString(),
3133 ptx->GetWitnessHash().ToString(),
3134 nodeid,
3135 state.ToString());
3136
3137 const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure);
3138
3139 if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
3140 AddToCompactExtraTransactions(ptx);
3141 }
3142 for (const Txid& parent_txid : unique_parents) {
3143 if (peer) AddKnownTx(*peer, parent_txid.ToUint256());
3144 }
3145
3146 return package_to_validate;
3147}
3148
3149void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
3150{
3151 AssertLockNotHeld(m_peer_mutex);
3152 AssertLockHeld(g_msgproc_mutex);
3153 AssertLockHeld(m_tx_download_mutex);
3154
3155 m_txdownloadman.MempoolAcceptedTx(tx);
3156
3157 LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3158 nodeid,
3159 tx->GetHash().ToString(),
3160 tx->GetWitnessHash().ToString(),
3161 m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3162
3163 InitiateTxBroadcastToAll(tx->GetHash(), tx->GetWitnessHash());
3164
3165 for (const CTransactionRef& removedTx : replaced_transactions) {
3166 AddToCompactExtraTransactions(removedTx);
3167 }
3168}
3169
3170void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
3171{
3172 AssertLockNotHeld(m_peer_mutex);
3173 AssertLockHeld(g_msgproc_mutex);
3174 AssertLockHeld(m_tx_download_mutex);
3175
3176 const auto& package = package_to_validate.m_txns;
3177 const auto& senders = package_to_validate.m_senders;
3178
3179 if (package_result.m_state.IsInvalid()) {
3180 m_txdownloadman.MempoolRejectedPackage(package);
3181 }
3182 // We currently only expect to process 1-parent-1-child packages. Remove if this changes.
3183 if (!Assume(package.size() == 2)) return;
3184
3185 // Iterate backwards to erase in-package descendants from the orphanage before they become
3186 // relevant in AddChildrenToWorkSet.
3187 auto package_iter = package.rbegin();
3188 auto senders_iter = senders.rbegin();
3189 while (package_iter != package.rend()) {
3190 const auto& tx = *package_iter;
3191 const NodeId nodeid = *senders_iter;
3192 const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())};
3193
3194 // It is not guaranteed that a result exists for every transaction.
3195 if (it_result != package_result.m_tx_results.end()) {
3196 const auto& tx_result = it_result->second;
3197 switch (tx_result.m_result_type) {
3199 {
3200 ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions);
3201 break;
3202 }
3205 {
3206 // Don't add to vExtraTxnForCompact, as these transactions should have already been
3207 // added there when added to the orphanage or rejected for TX_RECONSIDERABLE.
3208 // This should be updated if package submission is ever used for transactions
3209 // that haven't already been validated before.
3210 ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false);
3211 break;
3212 }
3214 {
3215 // AlreadyHaveTx() should be catching transactions that are already in mempool.
3216 Assume(false);
3217 break;
3218 }
3219 }
3220 }
3221 package_iter++;
3222 senders_iter++;
3223 }
3224}
3225
3226// NOTE: the orphan processing used to be uninterruptible and quadratic, which could allow a peer to stall the node for
3227// hours with specially crafted transactions. See https://bitcoincore.org/en/2024/07/03/disclose-orphan-dos.
3228bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3229{
3230 AssertLockHeld(g_msgproc_mutex);
3231 LOCK2(::cs_main, m_tx_download_mutex);
3232
3233 CTransactionRef porphanTx = nullptr;
3234
3235 while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) {
3236 const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3237 const TxValidationState& state = result.m_state;
3238 const Txid& orphanHash = porphanTx->GetHash();
3239 const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3240
3242 LogDebug(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3243 ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions);
3244 return true;
3245 } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3246 LogDebug(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3247 orphanHash.ToString(),
3248 orphan_wtxid.ToString(),
3249 peer.m_id,
3250 state.ToString());
3251
3252 if (Assume(state.IsInvalid() &&
3256 ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false);
3257 }
3258 return true;
3259 }
3260 }
3261
3262 return false;
3263}
3264
3265bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3266 BlockFilterType filter_type, uint32_t start_height,
3267 const uint256& stop_hash, uint32_t max_height_diff,
3268 const CBlockIndex*& stop_index,
3269 BlockFilterIndex*& filter_index)
3270{
3271 const bool supported_filter_type =
3272 (filter_type == BlockFilterType::BASIC &&
3273 (peer.m_our_services & NODE_COMPACT_FILTERS));
3274 if (!supported_filter_type) {
3275 LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n",
3276 static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs));
3277 node.fDisconnect = true;
3278 return false;
3279 }
3280
3281 {
3282 LOCK(cs_main);
3283 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3284
3285 // Check that the stop block exists and the peer would be allowed to fetch it.
3286 if (!stop_index || !BlockRequestAllowed(*stop_index)) {
3287 LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n",
3288 stop_hash.ToString(), node.DisconnectMsg(fLogIPs));
3289 node.fDisconnect = true;
3290 return false;
3291 }
3292 }
3293
3294 uint32_t stop_height = stop_index->nHeight;
3295 if (start_height > stop_height) {
3296 LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with "
3297 "start height %d and stop height %d, %s\n",
3298 start_height, stop_height, node.DisconnectMsg(fLogIPs));
3299 node.fDisconnect = true;
3300 return false;
3301 }
3302 if (stop_height - start_height >= max_height_diff) {
3303 LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n",
3304 stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs));
3305 node.fDisconnect = true;
3306 return false;
3307 }
3308
3309 filter_index = GetBlockFilterIndex(filter_type);
3310 if (!filter_index) {
3311 LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3312 return false;
3313 }
3314
3315 return true;
3316}
3317
3318void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv)
3319{
3320 uint8_t filter_type_ser;
3321 uint32_t start_height;
3322 uint256 stop_hash;
3323
3324 vRecv >> filter_type_ser >> start_height >> stop_hash;
3325
3326 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3327
3328 const CBlockIndex* stop_index;
3329 BlockFilterIndex* filter_index;
3330 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3331 MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3332 return;
3333 }
3334
3335 std::vector<BlockFilter> filters;
3336 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3337 LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3338 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3339 return;
3340 }
3341
3342 for (const auto& filter : filters) {
3343 MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
3344 }
3345}
3346
3347void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv)
3348{
3349 uint8_t filter_type_ser;
3350 uint32_t start_height;
3351 uint256 stop_hash;
3352
3353 vRecv >> filter_type_ser >> start_height >> stop_hash;
3354
3355 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3356
3357 const CBlockIndex* stop_index;
3358 BlockFilterIndex* filter_index;
3359 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3360 MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3361 return;
3362 }
3363
3364 uint256 prev_header;
3365 if (start_height > 0) {
3366 const CBlockIndex* const prev_block =
3367 stop_index->GetAncestor(static_cast<int>(start_height - 1));
3368 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3369 LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3370 BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3371 return;
3372 }
3373 }
3374
3375 std::vector<uint256> filter_hashes;
3376 if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3377 LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3378 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3379 return;
3380 }
3381
3382 MakeAndPushMessage(node, NetMsgType::CFHEADERS,
3383 filter_type_ser,
3384 stop_index->GetBlockHash(),
3385 prev_header,
3386 filter_hashes);
3387}
3388
3389void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv)
3390{
3391 uint8_t filter_type_ser;
3392 uint256 stop_hash;
3393
3394 vRecv >> filter_type_ser >> stop_hash;
3395
3396 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3397
3398 const CBlockIndex* stop_index;
3399 BlockFilterIndex* filter_index;
3400 if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3401 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3402 stop_index, filter_index)) {
3403 return;
3404 }
3405
3406 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3407
3408 // Populate headers.
3409 const CBlockIndex* block_index = stop_index;
3410 for (int i = headers.size() - 1; i >= 0; i--) {
3411 int height = (i + 1) * CFCHECKPT_INTERVAL;
3412 block_index = block_index->GetAncestor(height);
3413
3414 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3415 LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3416 BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3417 return;
3418 }
3419 }
3420
3421 MakeAndPushMessage(node, NetMsgType::CFCHECKPT,
3422 filter_type_ser,
3423 stop_index->GetBlockHash(),
3424 headers);
3425}
3426
3427void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3428{
3429 bool new_block{false};
3430 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3431 if (new_block) {
3433 // In case this block came from a different peer than we requested
3434 // from, we can erase the block request now anyway (as we just stored
3435 // this block to disk).
3436 LOCK(cs_main);
3437 RemoveBlockRequest(block->GetHash(), std::nullopt);
3438 } else {
3439 LOCK(cs_main);
3440 mapBlockSource.erase(block->GetHash());
3441 }
3442}
3443
3444void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3445{
3446 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3447 bool fBlockRead{false};
3448 {
3449 LOCK(cs_main);
3450
3451 auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3452 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3453 bool requested_block_from_this_peer{false};
3454
3455 // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3456 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3457
3458 while (range_flight.first != range_flight.second) {
3459 auto [node_id, block_it] = range_flight.first->second;
3460 if (node_id == pfrom.GetId() && block_it->partialBlock) {
3461 requested_block_from_this_peer = true;
3462 break;
3463 }
3464 range_flight.first++;
3465 }
3466
3467 if (!requested_block_from_this_peer) {
3468 LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3469 return;
3470 }
3471
3472 PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3473
3474 if (partialBlock.header.IsNull()) {
3475 // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left
3476 // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we
3477 // should not call LookupBlockIndex below.
3478 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3479 Misbehaving(peer, "previous compact block reconstruction attempt failed");
3480 LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId());
3481 return;
3482 }
3483
3484 // We should not have gotten this far in compact block processing unless it's attached to a known header
3485 const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))};
3486 ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn,
3487 /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
3488 if (status == READ_STATUS_INVALID) {
3489 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3490 Misbehaving(peer, "invalid compact block/non-matching block transactions");
3491 return;
3492 } else if (status == READ_STATUS_FAILED) {
3493 if (first_in_flight) {
3494 // Might have collided, fall back to getdata now :(
3495 // We keep the failed partialBlock to disallow processing another compact block announcement from the same
3496 // peer for the same block. We let the full block download below continue under the same m_downloading_since
3497 // timer.
3498 std::vector<CInv> invs;
3499 invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3500 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
3501 } else {
3502 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3503 LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3504 return;
3505 }
3506 } else {
3507 // Block is okay for further processing
3508 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3509 fBlockRead = true;
3510 // mapBlockSource is used for potentially punishing peers and
3511 // updating which peers send us compact blocks, so the race
3512 // between here and cs_main in ProcessNewBlock is fine.
3513 // BIP 152 permits peers to relay compact blocks after validating
3514 // the header only; we should not punish peers if the block turns
3515 // out to be invalid.
3516 mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3517 }
3518 } // Don't hold cs_main when we call into ProcessNewBlock
3519 if (fBlockRead) {
3520 // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3521 // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3522 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3523 // disk-space attacks), but this should be safe due to the
3524 // protections in the compact block handler -- see related comment
3525 // in compact block optimistic reconstruction handling.
3526 ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3527 }
3528 return;
3529}
3530
3531void PeerManagerImpl::LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block) {
3532 // To prevent log spam, this function should only be called after it was determined that a
3533 // header is both new and valid.
3534 //
3535 // These messages are valuable for detecting potential selfish mining behavior;
3536 // if multiple displacing headers are seen near simultaneously across many
3537 // nodes in the network, this might be an indication of selfish mining.
3538 // In addition it can be used to identify peers which send us a header, but
3539 // don't followup with a complete and valid (compact) block.
3540 // Having this log by default when not in IBD ensures broad availability of
3541 // this data in case investigation is merited.
3542 const auto msg = strprintf(
3543 "Saw new %sheader hash=%s height=%d peer=%d%s",
3544 via_compact_block ? "cmpctblock " : "",
3545 index.GetBlockHash().ToString(),
3546 index.nHeight,
3547 peer.GetId(),
3548 peer.LogIP(fLogIPs)
3549 );
3550 if (m_chainman.IsInitialBlockDownload()) {
3551 LogDebug(BCLog::VALIDATION, "%s", msg);
3552 } else {
3553 LogInfo("%s", msg);
3554 }
3555}
3556
3557void PeerManagerImpl::PushPrivateBroadcastTx(CNode& node)
3558{
3560
3561 const auto opt_tx{m_tx_for_private_broadcast.PickTxForSend(node.GetId(), CService{node.addr})};
3562 if (!opt_tx) {
3563 LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: no more transactions for private broadcast (connected in vain), peer=%d%s", node.GetId(), node.LogIP(fLogIPs));
3564 node.fDisconnect = true;
3565 return;
3566 }
3567 const CTransactionRef& tx{*opt_tx};
3568
3569 LogDebug(BCLog::PRIVBROADCAST, "P2P handshake completed, sending INV for txid=%s%s, peer=%d%s",
3570 tx->GetHash().ToString(), tx->HasWitness() ? strprintf(", wtxid=%s", tx->GetWitnessHash().ToString()) : "",
3571 node.GetId(), node.LogIP(fLogIPs));
3572
3573 MakeAndPushMessage(node, NetMsgType::INV, std::vector<CInv>{{CInv{MSG_TX, tx->GetHash().ToUint256()}}});
3574}
3575
3576void PeerManagerImpl::ProcessMessage(Peer& peer, CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
3577 const std::chrono::microseconds time_received,
3578 const std::atomic<bool>& interruptMsgProc)
3579{
3580 AssertLockHeld(g_msgproc_mutex);
3581
3582 LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3583
3584
3585 if (msg_type == NetMsgType::VERSION) {
3586 if (pfrom.nVersion != 0) {
3587 LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3588 return;
3589 }
3590
3591 int64_t nTime;
3592 CService addrMe;
3593 uint64_t nNonce = 1;
3594 ServiceFlags nServices;
3595 int nVersion;
3596 std::string cleanSubVer;
3597 int starting_height = -1;
3598 bool fRelay = true;
3599
3600 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3601 if (nTime < 0) {
3602 nTime = 0;
3603 }
3604 vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3605 vRecv >> CNetAddr::V1(addrMe);
3606 if (!pfrom.IsInboundConn())
3607 {
3608 // Overwrites potentially existing services. In contrast to this,
3609 // unvalidated services received via gossip relay in ADDR/ADDRV2
3610 // messages are only ever added but cannot replace existing ones.
3611 m_addrman.SetServices(pfrom.addr, nServices);
3612 }
3613 if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3614 {
3615 LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n",
3616 nServices,
3617 GetDesirableServiceFlags(nServices),
3618 pfrom.DisconnectMsg(fLogIPs));
3619 pfrom.fDisconnect = true;
3620 return;
3621 }
3622
3623 if (nVersion < MIN_PEER_PROTO_VERSION) {
3624 // disconnect from peers older than this proto version
3625 LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs));
3626 pfrom.fDisconnect = true;
3627 return;
3628 }
3629
3630 if (!vRecv.empty()) {
3631 // The version message includes information about the sending node which we don't use:
3632 // - 8 bytes (service bits)
3633 // - 16 bytes (ipv6 address)
3634 // - 2 bytes (port)
3635 vRecv.ignore(26);
3636 vRecv >> nNonce;
3637 }
3638 if (!vRecv.empty()) {
3639 std::string strSubVer;
3640 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3641 cleanSubVer = SanitizeString(strSubVer);
3642 }
3643 if (!vRecv.empty()) {
3644 vRecv >> starting_height;
3645 }
3646 if (!vRecv.empty())
3647 vRecv >> fRelay;
3648 // Disconnect if we connected to ourself
3649 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3650 {
3651 LogInfo("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3652 pfrom.fDisconnect = true;
3653 return;
3654 }
3655
3656 if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3657 {
3658 SeenLocal(addrMe);
3659 }
3660
3661 // Inbound peers send us their version message when they connect.
3662 // We send our version message in response.
3663 if (pfrom.IsInboundConn()) {
3664 PushNodeVersion(pfrom, peer);
3665 }
3666
3667 // Change version
3668 const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3669 pfrom.SetCommonVersion(greatest_common_version);
3670 pfrom.nVersion = nVersion;
3671
3672 pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3673 peer.m_their_services = nServices;
3674 pfrom.SetAddrLocal(addrMe);
3675 {
3676 LOCK(pfrom.m_subver_mutex);
3677 pfrom.cleanSubVer = cleanSubVer;
3678 }
3679 peer.m_starting_height = starting_height;
3680
3681 // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3682 // - this isn't an outbound block-relay-only connection, and
3683 // - this isn't an outbound feeler connection, and
3684 // - fRelay=true (the peer wishes to receive transaction announcements)
3685 // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3686 // the peer may turn on transaction relay later.
3687 if (!pfrom.IsBlockOnlyConn() &&
3688 !pfrom.IsFeelerConn() &&
3689 (fRelay || (peer.m_our_services & NODE_BLOOM))) {
3690 auto* const tx_relay = peer.SetTxRelay();
3691 {
3692 LOCK(tx_relay->m_bloom_filter_mutex);
3693 tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3694 }
3695 if (fRelay) pfrom.m_relays_txs = true;
3696 }
3697
3698 const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3699 LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3700 cleanSubVer, pfrom.nVersion,
3701 peer.m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3702 pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3703
3704 if (pfrom.IsPrivateBroadcastConn()) {
3705 if (fRelay) {
3706 MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3707 } else {
3708 LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: does not support transaction relay (connected in vain), peer=%d%s",
3709 pfrom.GetId(), pfrom.LogIP(fLogIPs));
3710 pfrom.fDisconnect = true;
3711 }
3712 return;
3713 }
3714
3715 if (greatest_common_version >= WTXID_RELAY_VERSION) {
3716 MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY);
3717 }
3718
3719 // Signal ADDRv2 support (BIP155).
3720 if (greatest_common_version >= 70016) {
3721 // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3722 // implementations reject messages they don't know. As a courtesy, don't send
3723 // it to nodes with a version before 70016, as no software is known to support
3724 // BIP155 that doesn't announce at least that protocol version number.
3725 MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
3726 }
3727
3728 if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3729 // Per BIP-330, we announce txreconciliation support if:
3730 // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3731 // - transaction relay is supported per the peer's VERSION message
3732 // - this is not a block-relay-only connection and not a feeler
3733 // - this is not an addr fetch connection;
3734 // - we are not in -blocksonly mode.
3735 const auto* tx_relay = peer.GetTxRelay();
3736 if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3737 !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3738 const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3739 MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL,
3740 TXRECONCILIATION_VERSION, recon_salt);
3741 }
3742 }
3743
3744 MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3745
3746 // Potentially mark this peer as a preferred download peer.
3747 {
3748 LOCK(cs_main);
3749 CNodeState* state = State(pfrom.GetId());
3750 state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(peer);
3751 m_num_preferred_download_peers += state->fPreferredDownload;
3752 }
3753
3754 // Attempt to initialize address relay for outbound peers and use result
3755 // to decide whether to send GETADDR, so that we don't send it to
3756 // inbound or outbound block-relay-only peers.
3757 bool send_getaddr{false};
3758 if (!pfrom.IsInboundConn()) {
3759 send_getaddr = SetupAddressRelay(pfrom, peer);
3760 }
3761 if (send_getaddr) {
3762 // Do a one-time address fetch to help populate/update our addrman.
3763 // If we're starting up for the first time, our addrman may be pretty
3764 // empty, so this mechanism is important to help us connect to the network.
3765 // We skip this for block-relay-only peers. We want to avoid
3766 // potentially leaking addr information and we do not want to
3767 // indicate to the peer that we will participate in addr relay.
3768 MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
3769 peer.m_getaddr_sent = true;
3770 // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3771 // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3772 peer.m_addr_token_bucket += MAX_ADDR_TO_SEND;
3773 }
3774
3775 if (!pfrom.IsInboundConn()) {
3776 // For non-inbound connections, we update the addrman to record
3777 // connection success so that addrman will have an up-to-date
3778 // notion of which peers are online and available.
3779 //
3780 // While we strive to not leak information about block-relay-only
3781 // connections via the addrman, not moving an address to the tried
3782 // table is also potentially detrimental because new-table entries
3783 // are subject to eviction in the event of addrman collisions. We
3784 // mitigate the information-leak by never calling
3785 // AddrMan::Connected() on block-relay-only peers; see
3786 // FinalizeNode().
3787 //
3788 // This moves an address from New to Tried table in Addrman,
3789 // resolves tried-table collisions, etc.
3790 m_addrman.Good(pfrom.addr);
3791 }
3792
3793 peer.m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
3794 if (!pfrom.IsInboundConn()) {
3795 // Don't use timedata samples from inbound peers to make it
3796 // harder for others to create false warnings about our clock being out of sync.
3797 m_outbound_time_offsets.Add(peer.m_time_offset);
3798 m_outbound_time_offsets.WarnIfOutOfSync();
3799 }
3800
3801 // If the peer is old enough to have the old alert system, send it the final alert.
3802 if (greatest_common_version <= 70012) {
3803 constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex};
3804 MakeAndPushMessage(pfrom, "alert", finalAlert);
3805 }
3806
3807 // Feeler connections exist only to verify if address is online.
3808 if (pfrom.IsFeelerConn()) {
3809 LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
3810 pfrom.fDisconnect = true;
3811 }
3812 return;
3813 }
3814
3815 if (pfrom.nVersion == 0) {
3816 // Must have a version message before anything else
3817 LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3818 return;
3819 }
3820
3821 if (msg_type == NetMsgType::VERACK) {
3822 if (pfrom.fSuccessfullyConnected) {
3823 LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3824 return;
3825 }
3826
3827 auto new_peer_msg = [&]() {
3828 const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3829 return strprintf("New %s peer connected: transport: %s, version: %d, blocks=%d peer=%d%s%s\n",
3830 pfrom.ConnectionTypeAsString(),
3831 TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3832 pfrom.nVersion.load(), peer.m_starting_height,
3833 pfrom.GetId(), pfrom.LogIP(fLogIPs),
3834 (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3835 };
3836
3837 // Log successful connections unconditionally for outbound, but not for inbound as those
3838 // can be triggered by an attacker at high rate.
3839 if (pfrom.IsInboundConn()) {
3840 LogDebug(BCLog::NET, "%s", new_peer_msg());
3841 } else {
3842 LogInfo("%s", new_peer_msg());
3843 }
3844
3845 if (auto tx_relay = peer.GetTxRelay()) {
3846 // `TxRelay::m_tx_inventory_to_send` must be empty before the
3847 // version handshake is completed as
3848 // `TxRelay::m_next_inv_send_time` is first initialised in
3849 // `SendMessages` after the verack is received. Any transactions
3850 // received during the version handshake would otherwise
3851 // immediately be advertised without random delay, potentially
3852 // leaking the time of arrival to a spy.
3854 tx_relay->m_tx_inventory_mutex,
3855 return tx_relay->m_tx_inventory_to_send.empty() &&
3856 tx_relay->m_next_inv_send_time == 0s));
3857 }
3858
3859 if (pfrom.IsPrivateBroadcastConn()) {
3860 pfrom.fSuccessfullyConnected = true;
3861 // The peer may intend to later send us NetMsgType::FEEFILTER limiting
3862 // cheap transactions, but we don't wait for that and thus we may send
3863 // them a transaction below their threshold. This is ok because this
3864 // relay logic is designed to work even in cases when the peer drops
3865 // the transaction (due to it being too cheap, or for other reasons).
3866 PushPrivateBroadcastTx(pfrom);
3867 return;
3868 }
3869
3871 // Tell our peer we are willing to provide version 2 cmpctblocks.
3872 // However, we do not request new block announcements using
3873 // cmpctblock messages.
3874 // We send this to non-NODE NETWORK peers as well, because
3875 // they may wish to request compact blocks from us
3876 MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
3877 }
3878
3879 if (m_txreconciliation) {
3880 if (!peer.m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3881 // We could have optimistically pre-registered/registered the peer. In that case,
3882 // we should forget about the reconciliation state here if this wasn't followed
3883 // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3884 m_txreconciliation->ForgetPeer(pfrom.GetId());
3885 }
3886 }
3887
3888 {
3889 LOCK2(::cs_main, m_tx_download_mutex);
3890 const CNodeState* state = State(pfrom.GetId());
3891 m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo {
3892 .m_preferred = state->fPreferredDownload,
3893 .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay),
3894 .m_wtxid_relay = peer.m_wtxid_relay,
3895 });
3896 }
3897
3898 pfrom.fSuccessfullyConnected = true;
3899 return;
3900 }
3901
3902 if (msg_type == NetMsgType::SENDHEADERS) {
3903 peer.m_prefers_headers = true;
3904 return;
3905 }
3906
3907 if (msg_type == NetMsgType::SENDCMPCT) {
3908 bool sendcmpct_hb{false};
3909 uint64_t sendcmpct_version{0};
3910 vRecv >> sendcmpct_hb >> sendcmpct_version;
3911
3912 // Only support compact block relay with witnesses
3913 if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3914
3915 LOCK(cs_main);
3916 CNodeState* nodestate = State(pfrom.GetId());
3917 nodestate->m_provides_cmpctblocks = true;
3918 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3919 // save whether peer selects us as BIP152 high-bandwidth peer
3920 // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3921 pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3922 return;
3923 }
3924
3925 // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3926 // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3927 if (msg_type == NetMsgType::WTXIDRELAY) {
3928 if (pfrom.fSuccessfullyConnected) {
3929 // Disconnect peers that send a wtxidrelay message after VERACK.
3930 LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3931 pfrom.fDisconnect = true;
3932 return;
3933 }
3934 if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3935 if (!peer.m_wtxid_relay) {
3936 peer.m_wtxid_relay = true;
3937 m_wtxid_relay_peers++;
3938 } else {
3939 LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3940 }
3941 } else {
3942 LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3943 }
3944 return;
3945 }
3946
3947 // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3948 // between VERSION and VERACK.
3949 if (msg_type == NetMsgType::SENDADDRV2) {
3950 if (pfrom.fSuccessfullyConnected) {
3951 // Disconnect peers that send a SENDADDRV2 message after VERACK.
3952 LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3953 pfrom.fDisconnect = true;
3954 return;
3955 }
3956 peer.m_wants_addrv2 = true;
3957 return;
3958 }
3959
3960 // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3961 // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3962 // from switching announcement protocols after the connection is up.
3963 if (msg_type == NetMsgType::SENDTXRCNCL) {
3964 if (!m_txreconciliation) {
3965 LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3966 return;
3967 }
3968
3969 if (pfrom.fSuccessfullyConnected) {
3970 LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
3971 pfrom.fDisconnect = true;
3972 return;
3973 }
3974
3975 // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3976 if (RejectIncomingTxs(pfrom)) {
3977 LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs));
3978 pfrom.fDisconnect = true;
3979 return;
3980 }
3981
3982 // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3983 // This flag might also be false in other cases, but the RejectIncomingTxs check above
3984 // eliminates them, so that this flag fully represents what we are looking for.
3985 const auto* tx_relay = peer.GetTxRelay();
3986 if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3987 LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs));
3988 pfrom.fDisconnect = true;
3989 return;
3990 }
3991
3992 uint32_t peer_txreconcl_version;
3993 uint64_t remote_salt;
3994 vRecv >> peer_txreconcl_version >> remote_salt;
3995
3996 const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3997 peer_txreconcl_version, remote_salt);
3998 switch (result) {
4000 LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
4001 break;
4003 break;
4005 LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs));
4006 pfrom.fDisconnect = true;
4007 return;
4009 LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs));
4010 pfrom.fDisconnect = true;
4011 return;
4012 }
4013 return;
4014 }
4015
4016 if (!pfrom.fSuccessfullyConnected) {
4017 LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4018 return;
4019 }
4020
4021 if (pfrom.IsPrivateBroadcastConn()) {
4022 if (msg_type != NetMsgType::PONG && msg_type != NetMsgType::GETDATA) {
4023 LogDebug(BCLog::PRIVBROADCAST, "Ignoring incoming message '%s', peer=%d%s", msg_type, pfrom.GetId(), pfrom.LogIP(fLogIPs));
4024 return;
4025 }
4026 }
4027
4028 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
4029 const auto ser_params{
4030 msg_type == NetMsgType::ADDRV2 ?
4031 // Set V2 param so that the CNetAddr and CAddress
4032 // unserialize methods know that an address in v2 format is coming.
4035 };
4036
4037 std::vector<CAddress> vAddr;
4038
4039 vRecv >> ser_params(vAddr);
4040
4041 if (!SetupAddressRelay(pfrom, peer)) {
4042 LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
4043 return;
4044 }
4045
4046 if (vAddr.size() > MAX_ADDR_TO_SEND)
4047 {
4048 Misbehaving(peer, strprintf("%s message size = %u", msg_type, vAddr.size()));
4049 return;
4050 }
4051
4052 // Store the new addresses
4053 std::vector<CAddress> vAddrOk;
4054 const auto current_a_time{Now<NodeSeconds>()};
4055
4056 // Update/increment addr rate limiting bucket.
4057 const auto current_time{GetTime<std::chrono::microseconds>()};
4058 if (peer.m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
4059 // Don't increment bucket if it's already full
4060 const auto time_diff = std::max(current_time - peer.m_addr_token_timestamp, 0us);
4061 const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
4062 peer.m_addr_token_bucket = std::min<double>(peer.m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
4063 }
4064 peer.m_addr_token_timestamp = current_time;
4065
4066 const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
4067 uint64_t num_proc = 0;
4068 uint64_t num_rate_limit = 0;
4069 std::shuffle(vAddr.begin(), vAddr.end(), m_rng);
4070 for (CAddress& addr : vAddr)
4071 {
4072 if (interruptMsgProc)
4073 return;
4074
4075 // Apply rate limiting.
4076 if (peer.m_addr_token_bucket < 1.0) {
4077 if (rate_limited) {
4078 ++num_rate_limit;
4079 continue;
4080 }
4081 } else {
4082 peer.m_addr_token_bucket -= 1.0;
4083 }
4084 // We only bother storing full nodes, though this may include
4085 // things which we would not make an outbound connection to, in
4086 // part because we may make feeler connections to them.
4087 if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
4088 continue;
4089
4090 if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
4091 addr.nTime = current_a_time - 5 * 24h;
4092 }
4093 AddAddressKnown(peer, addr);
4094 if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
4095 // Do not process banned/discouraged addresses beyond remembering we received them
4096 continue;
4097 }
4098 ++num_proc;
4099 const bool reachable{g_reachable_nets.Contains(addr)};
4100 if (addr.nTime > current_a_time - 10min && !peer.m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
4101 // Relay to a limited number of other nodes
4102 RelayAddress(pfrom.GetId(), addr, reachable);
4103 }
4104 // Do not store addresses outside our network
4105 if (reachable) {
4106 vAddrOk.push_back(addr);
4107 }
4108 }
4109 peer.m_addr_processed += num_proc;
4110 peer.m_addr_rate_limited += num_rate_limit;
4111 LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
4112 vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
4113
4114 m_addrman.Add(vAddrOk, pfrom.addr, /*time_penalty=*/2h);
4115 if (vAddr.size() < 1000) peer.m_getaddr_sent = false;
4116
4117 // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
4118 if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
4119 LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
4120 pfrom.fDisconnect = true;
4121 }
4122 return;
4123 }
4124
4125 if (msg_type == NetMsgType::INV) {
4126 std::vector<CInv> vInv;
4127 vRecv >> vInv;
4128 if (vInv.size() > MAX_INV_SZ)
4129 {
4130 Misbehaving(peer, strprintf("inv message size = %u", vInv.size()));
4131 return;
4132 }
4133
4134 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
4135
4136 LOCK2(cs_main, m_tx_download_mutex);
4137
4138 const auto current_time{GetTime<std::chrono::microseconds>()};
4139 uint256* best_block{nullptr};
4140
4141 for (CInv& inv : vInv) {
4142 if (interruptMsgProc) return;
4143
4144 // Ignore INVs that don't match wtxidrelay setting.
4145 // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
4146 // This is fine as no INV messages are involved in that process.
4147 if (peer.m_wtxid_relay) {
4148 if (inv.IsMsgTx()) continue;
4149 } else {
4150 if (inv.IsMsgWtx()) continue;
4151 }
4152
4153 if (inv.IsMsgBlk()) {
4154 const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
4155 LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
4156
4157 UpdateBlockAvailability(pfrom.GetId(), inv.hash);
4158 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
4159 // Headers-first is the primary method of announcement on
4160 // the network. If a node fell back to sending blocks by
4161 // inv, it may be for a re-org, or because we haven't
4162 // completed initial headers sync. The final block hash
4163 // provided should be the highest, so send a getheaders and
4164 // then fetch the blocks we need to catch up.
4165 best_block = &inv.hash;
4166 }
4167 } else if (inv.IsGenTxMsg()) {
4168 if (reject_tx_invs) {
4169 LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs));
4170 pfrom.fDisconnect = true;
4171 return;
4172 }
4173 const GenTxid gtxid = ToGenTxid(inv);
4174 AddKnownTx(peer, inv.hash);
4175
4176 if (!m_chainman.IsInitialBlockDownload()) {
4177 const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)};
4178 LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
4179 }
4180 } else {
4181 LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
4182 }
4183 }
4184
4185 if (best_block != nullptr) {
4186 // If we haven't started initial headers-sync with this peer, then
4187 // consider sending a getheaders now. On initial startup, there's a
4188 // reliability vs bandwidth tradeoff, where we are only trying to do
4189 // initial headers sync with one peer at a time, with a long
4190 // timeout (at which point, if the sync hasn't completed, we will
4191 // disconnect the peer and then choose another). In the meantime,
4192 // as new blocks are found, we are willing to add one new peer per
4193 // block to sync with as well, to sync quicker in the case where
4194 // our initial peer is unresponsive (but less bandwidth than we'd
4195 // use if we turned on sync with all peers).
4196 CNodeState& state{*Assert(State(pfrom.GetId()))};
4197 if (state.fSyncStarted || (!peer.m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
4198 if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer)) {
4199 LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
4200 m_chainman.m_best_header->nHeight, best_block->ToString(),
4201 pfrom.GetId());
4202 }
4203 if (!state.fSyncStarted) {
4204 peer.m_inv_triggered_getheaders_before_sync = true;
4205 // Update the last block hash that triggered a new headers
4206 // sync, so that we don't turn on headers sync with more
4207 // than 1 new peer every new block.
4208 m_last_block_inv_triggering_headers_sync = *best_block;
4209 }
4210 }
4211 }
4212
4213 return;
4214 }
4215
4216 if (msg_type == NetMsgType::GETDATA) {
4217 std::vector<CInv> vInv;
4218 vRecv >> vInv;
4219 if (vInv.size() > MAX_INV_SZ)
4220 {
4221 Misbehaving(peer, strprintf("getdata message size = %u", vInv.size()));
4222 return;
4223 }
4224
4225 LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
4226
4227 if (vInv.size() > 0) {
4228 LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
4229 }
4230
4231 if (pfrom.IsPrivateBroadcastConn()) {
4232 const auto pushed_tx_opt{m_tx_for_private_broadcast.GetTxForNode(pfrom.GetId())};
4233 if (!pushed_tx_opt) {
4234 LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: got GETDATA without sending an INV, peer=%d%s",
4235 pfrom.GetId(), fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : "");
4236 pfrom.fDisconnect = true;
4237 return;
4238 }
4239
4240 const CTransactionRef& pushed_tx{*pushed_tx_opt};
4241
4242 // The GETDATA request must contain exactly one inv and it must be for the transaction
4243 // that we INVed to the peer earlier.
4244 if (vInv.size() == 1 && vInv[0].IsMsgTx() && vInv[0].hash == pushed_tx->GetHash().ToUint256()) {
4245
4246 MakeAndPushMessage(pfrom, NetMsgType::TX, TX_WITH_WITNESS(*pushed_tx));
4247
4248 peer.m_ping_queued = true; // Ensure a ping will be sent: mimic a request via RPC.
4249 MaybeSendPing(pfrom, peer, GetTime<std::chrono::microseconds>());
4250 } else {
4251 LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: got an unexpected GETDATA message, peer=%d%s",
4252 pfrom.GetId(), fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : "");
4253 pfrom.fDisconnect = true;
4254 }
4255 return;
4256 }
4257
4258 {
4259 LOCK(peer.m_getdata_requests_mutex);
4260 peer.m_getdata_requests.insert(peer.m_getdata_requests.end(), vInv.begin(), vInv.end());
4261 ProcessGetData(pfrom, peer, interruptMsgProc);
4262 }
4263
4264 return;
4265 }
4266
4267 if (msg_type == NetMsgType::GETBLOCKS) {
4268 CBlockLocator locator;
4269 uint256 hashStop;
4270 vRecv >> locator >> hashStop;
4271
4272 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4273 LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4274 pfrom.fDisconnect = true;
4275 return;
4276 }
4277
4278 // We might have announced the currently-being-connected tip using a
4279 // compact block, which resulted in the peer sending a getblocks
4280 // request, which we would otherwise respond to without the new block.
4281 // To avoid this situation we simply verify that we are on our best
4282 // known chain now. This is super overkill, but we handle it better
4283 // for getheaders requests, and there are no known nodes which support
4284 // compact blocks but still use getblocks to request blocks.
4285 {
4286 std::shared_ptr<const CBlock> a_recent_block;
4287 {
4288 LOCK(m_most_recent_block_mutex);
4289 a_recent_block = m_most_recent_block;
4290 }
4291 BlockValidationState state;
4292 if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4293 LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4294 }
4295 }
4296
4297 LOCK(cs_main);
4298
4299 // Find the last block the caller has in the main chain
4300 const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4301
4302 // Send the rest of the chain
4303 if (pindex)
4304 pindex = m_chainman.ActiveChain().Next(pindex);
4305 int nLimit = 500;
4306 LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4307 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4308 {
4309 if (pindex->GetBlockHash() == hashStop)
4310 {
4311 LogDebug(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4312 break;
4313 }
4314 // If pruning, don't inv blocks unless we have on disk and are likely to still have
4315 // for some reasonable time window (1 hour) that block relay might require.
4316 const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4317 if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4318 LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4319 break;
4320 }
4321 WITH_LOCK(peer.m_block_inv_mutex, peer.m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4322 if (--nLimit <= 0) {
4323 // When this block is requested, we'll send an inv that'll
4324 // trigger the peer to getblocks the next batch of inventory.
4325 LogDebug(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4326 WITH_LOCK(peer.m_block_inv_mutex, {peer.m_continuation_block = pindex->GetBlockHash();});
4327 break;
4328 }
4329 }
4330 return;
4331 }
4332
4333 if (msg_type == NetMsgType::GETBLOCKTXN) {
4334 BlockTransactionsRequest req;
4335 vRecv >> req;
4336 // Verify differential encoding invariant: indexes must be strictly increasing
4337 // DifferenceFormatter should guarantee this property during deserialization
4338 for (size_t i = 1; i < req.indexes.size(); ++i) {
4339 Assume(req.indexes[i] > req.indexes[i-1]);
4340 }
4341
4342 std::shared_ptr<const CBlock> recent_block;
4343 {
4344 LOCK(m_most_recent_block_mutex);
4345 if (m_most_recent_block_hash == req.blockhash)
4346 recent_block = m_most_recent_block;
4347 // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4348 }
4349 if (recent_block) {
4350 SendBlockTransactions(pfrom, peer, *recent_block, req);
4351 return;
4352 }
4353
4354 FlatFilePos block_pos{};
4355 {
4356 LOCK(cs_main);
4357
4358 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4359 if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4360 LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4361 return;
4362 }
4363
4364 if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4365 block_pos = pindex->GetBlockPos();
4366 }
4367 }
4368
4369 if (!block_pos.IsNull()) {
4370 CBlock block;
4371 const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos, req.blockhash)};
4372 // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
4373 // pruned after we release cs_main above, so this read should never fail.
4374 assert(ret);
4375
4376 SendBlockTransactions(pfrom, peer, block, req);
4377 return;
4378 }
4379
4380 // If an older block is requested (should never happen in practice,
4381 // but can happen in tests) send a block response instead of a
4382 // blocktxn response. Sending a full block response instead of a
4383 // small blocktxn response is preferable in the case where a peer
4384 // might maliciously send lots of getblocktxn requests to trigger
4385 // expensive disk reads, because it will require the peer to
4386 // actually receive all the data read from disk over the network.
4387 LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4388 CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
4389 WITH_LOCK(peer.m_getdata_requests_mutex, peer.m_getdata_requests.push_back(inv));
4390 // The message processing loop will go around again (without pausing) and we'll respond then
4391 return;
4392 }
4393
4394 if (msg_type == NetMsgType::GETHEADERS) {
4395 CBlockLocator locator;
4396 uint256 hashStop;
4397 vRecv >> locator >> hashStop;
4398
4399 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4400 LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
4401 pfrom.fDisconnect = true;
4402 return;
4403 }
4404
4405 if (m_chainman.m_blockman.LoadingBlocks()) {
4406 LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4407 return;
4408 }
4409
4410 LOCK(cs_main);
4411
4412 // Don't serve headers from our active chain until our chainwork is at least
4413 // the minimum chain work. This prevents us from starting a low-work headers
4414 // sync that will inevitably be aborted by our peer.
4415 if (m_chainman.ActiveTip() == nullptr ||
4416 (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4417 LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4418 // Just respond with an empty headers message, to tell the peer to
4419 // go away but not treat us as unresponsive.
4420 MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>());
4421 return;
4422 }
4423
4424 CNodeState *nodestate = State(pfrom.GetId());
4425 const CBlockIndex* pindex = nullptr;
4426 if (locator.IsNull())
4427 {
4428 // If locator is null, return the hashStop block
4429 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4430 if (!pindex) {
4431 return;
4432 }
4433 if (!BlockRequestAllowed(*pindex)) {
4434 LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4435 return;
4436 }
4437 }
4438 else
4439 {
4440 // Find the last block the caller has in the main chain
4441 pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4442 if (pindex)
4443 pindex = m_chainman.ActiveChain().Next(pindex);
4444 }
4445
4446 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4447 std::vector<CBlock> vHeaders;
4448 int nLimit = m_opts.max_headers_result;
4449 LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4450 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4451 {
4452 vHeaders.emplace_back(pindex->GetBlockHeader());
4453 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4454 break;
4455 }
4456 // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4457 // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4458 // headers message). In both cases it's safe to update
4459 // pindexBestHeaderSent to be our tip.
4460 //
4461 // It is important that we simply reset the BestHeaderSent value here,
4462 // and not max(BestHeaderSent, newHeaderSent). We might have announced
4463 // the currently-being-connected tip using a compact block, which
4464 // resulted in the peer sending a headers request, which we respond to
4465 // without the new block. By resetting the BestHeaderSent, we ensure we
4466 // will re-announce the new block via headers (or compact blocks again)
4467 // in the SendMessages logic.
4468 nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4469 MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
4470 return;
4471 }
4472
4473 if (msg_type == NetMsgType::TX) {
4474 if (RejectIncomingTxs(pfrom)) {
4475 LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs));
4476 pfrom.fDisconnect = true;
4477 return;
4478 }
4479
4480 // Stop processing the transaction early if we are still in IBD since we don't
4481 // have enough information to validate it yet. Sending unsolicited transactions
4482 // is not considered a protocol violation, so don't punish the peer.
4483 if (m_chainman.IsInitialBlockDownload()) return;
4484
4485 CTransactionRef ptx;
4486 vRecv >> TX_WITH_WITNESS(ptx);
4487
4488 const Txid& txid = ptx->GetHash();
4489 const Wtxid& wtxid = ptx->GetWitnessHash();
4490
4491 const uint256& hash = peer.m_wtxid_relay ? wtxid.ToUint256() : txid.ToUint256();
4492 AddKnownTx(peer, hash);
4493
4494 if (const auto num_broadcasted{m_tx_for_private_broadcast.Remove(ptx)}) {
4495 LogDebug(BCLog::PRIVBROADCAST, "Received our privately broadcast transaction (txid=%s) from the "
4496 "network from peer=%d%s; stopping private broadcast attempts",
4497 txid.ToString(), pfrom.GetId(), pfrom.LogIP(fLogIPs));
4498 if (NUM_PRIVATE_BROADCAST_PER_TX > num_broadcasted.value()) {
4499 // Not all of the initial NUM_PRIVATE_BROADCAST_PER_TX connections were needed.
4500 // Tell CConnman it does not need to start the remaining ones.
4501 m_connman.m_private_broadcast.NumToOpenSub(NUM_PRIVATE_BROADCAST_PER_TX - num_broadcasted.value());
4502 }
4503 }
4504
4505 LOCK2(cs_main, m_tx_download_mutex);
4506
4507 const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx);
4508 if (!should_validate) {
4510 // Always relay transactions received from peers with forcerelay
4511 // permission, even if they were already in the mempool, allowing
4512 // the node to function as a gateway for nodes hidden behind it.
4513 if (!m_mempool.exists(txid)) {
4514 LogInfo("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4515 txid.ToString(), wtxid.ToString(), pfrom.GetId());
4516 } else {
4517 LogInfo("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4518 txid.ToString(), wtxid.ToString(), pfrom.GetId());
4519 InitiateTxBroadcastToAll(txid, wtxid);
4520 }
4521 }
4522
4523 if (package_to_validate) {
4524 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4525 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4526 package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4527 ProcessPackageResult(package_to_validate.value(), package_result);
4528 }
4529 return;
4530 }
4531
4532 // ReceivedTx should not be telling us to validate the tx and a package.
4533 Assume(!package_to_validate.has_value());
4534
4535 const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4536 const TxValidationState& state = result.m_state;
4537
4539 ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions);
4541 }
4542 if (state.IsInvalid()) {
4543 if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) {
4544 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4545 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4546 package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4547 ProcessPackageResult(package_to_validate.value(), package_result);
4548 }
4549 }
4550
4551 return;
4552 }
4553
4554 if (msg_type == NetMsgType::CMPCTBLOCK)
4555 {
4556 // Ignore cmpctblock received while importing
4557 if (m_chainman.m_blockman.LoadingBlocks()) {
4558 LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4559 return;
4560 }
4561
4562 CBlockHeaderAndShortTxIDs cmpctblock;
4563 vRecv >> cmpctblock;
4564
4565 bool received_new_header = false;
4566 const auto blockhash = cmpctblock.header.GetHash();
4567
4568 {
4569 LOCK(cs_main);
4570
4571 const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4572 if (!prev_block) {
4573 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4574 if (!m_chainman.IsInitialBlockDownload()) {
4575 MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer);
4576 }
4577 return;
4578 } else if (prev_block->nChainWork + GetBlockProof(cmpctblock.header) < GetAntiDoSWorkThreshold()) {
4579 // If we get a low-work header in a compact block, we can ignore it.
4580 LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4581 return;
4582 }
4583
4584 if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4585 received_new_header = true;
4586 }
4587 }
4588
4589 const CBlockIndex *pindex = nullptr;
4590 BlockValidationState state;
4591 if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) {
4592 if (state.IsInvalid()) {
4593 MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4594 return;
4595 }
4596 }
4597
4598 // If AcceptBlockHeader returned true, it set pindex
4599 Assert(pindex);
4600 if (received_new_header) {
4601 LogBlockHeader(*pindex, pfrom, /*via_compact_block=*/true);
4602 }
4603
4604 bool fProcessBLOCKTXN = false;
4605
4606 // If we end up treating this as a plain headers message, call that as well
4607 // without cs_main.
4608 bool fRevertToHeaderProcessing = false;
4609
4610 // Keep a CBlock for "optimistic" compactblock reconstructions (see
4611 // below)
4612 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4613 bool fBlockReconstructed = false;
4614
4615 {
4616 LOCK(cs_main);
4617 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4618
4619 CNodeState *nodestate = State(pfrom.GetId());
4620
4621 // If this was a new header with more work than our tip, update the
4622 // peer's last block announcement time
4623 if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4624 nodestate->m_last_block_announcement = GetTime();
4625 }
4626
4627 if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4628 return;
4629
4630 auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4631 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4632 bool requested_block_from_this_peer{false};
4633
4634 // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4635 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4636
4637 while (range_flight.first != range_flight.second) {
4638 if (range_flight.first->second.first == pfrom.GetId()) {
4639 requested_block_from_this_peer = true;
4640 break;
4641 }
4642 range_flight.first++;
4643 }
4644
4645 if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4646 pindex->nTx != 0) { // We had this block at some point, but pruned it
4647 if (requested_block_from_this_peer) {
4648 // We requested this block for some reason, but our mempool will probably be useless
4649 // so we just grab the block via normal getdata
4650 std::vector<CInv> vInv(1);
4651 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash);
4652 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4653 }
4654 return;
4655 }
4656
4657 // If we're not close to tip yet, give up and let parallel block fetch work its magic
4658 if (!already_in_flight && !CanDirectFetch()) {
4659 return;
4660 }
4661
4662 // We want to be a bit conservative just to be extra careful about DoS
4663 // possibilities in compact block processing...
4664 if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4665 if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4666 requested_block_from_this_peer) {
4667 std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4668 if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4669 if (!(*queuedBlockIt)->partialBlock)
4670 (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4671 else {
4672 // The block was already in flight using compact blocks from the same peer
4673 LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4674 return;
4675 }
4676 }
4677
4678 PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4679 ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4680 if (status == READ_STATUS_INVALID) {
4681 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4682 Misbehaving(peer, "invalid compact block");
4683 return;
4684 } else if (status == READ_STATUS_FAILED) {
4685 if (first_in_flight) {
4686 // Duplicate txindexes, the block is now in-flight, so just request it
4687 std::vector<CInv> vInv(1);
4688 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash);
4689 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4690 } else {
4691 // Give up for this peer and wait for other peer(s)
4692 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4693 }
4694 return;
4695 }
4696
4697 BlockTransactionsRequest req;
4698 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4699 if (!partialBlock.IsTxAvailable(i))
4700 req.indexes.push_back(i);
4701 }
4702 if (req.indexes.empty()) {
4703 fProcessBLOCKTXN = true;
4704 } else if (first_in_flight) {
4705 // We will try to round-trip any compact blocks we get on failure,
4706 // as long as it's first...
4707 req.blockhash = pindex->GetBlockHash();
4708 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4709 } else if (pfrom.m_bip152_highbandwidth_to &&
4710 (!pfrom.IsInboundConn() ||
4711 IsBlockRequestedFromOutbound(blockhash) ||
4712 already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4713 // ... or it's a hb relay peer and:
4714 // - peer is outbound, or
4715 // - we already have an outbound attempt in flight(so we'll take what we can get), or
4716 // - it's not the final parallel download slot (which we may reserve for first outbound)
4717 req.blockhash = pindex->GetBlockHash();
4718 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4719 } else {
4720 // Give up for this peer and wait for other peer(s)
4721 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4722 }
4723 } else {
4724 // This block is either already in flight from a different
4725 // peer, or this peer has too many blocks outstanding to
4726 // download from.
4727 // Optimistically try to reconstruct anyway since we might be
4728 // able to without any round trips.
4729 PartiallyDownloadedBlock tempBlock(&m_mempool);
4730 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4731 if (status != READ_STATUS_OK) {
4732 // TODO: don't ignore failures
4733 return;
4734 }
4735 std::vector<CTransactionRef> dummy;
4736 const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))};
4737 status = tempBlock.FillBlock(*pblock, dummy,
4738 /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
4739 if (status == READ_STATUS_OK) {
4740 fBlockReconstructed = true;
4741 }
4742 }
4743 } else {
4744 if (requested_block_from_this_peer) {
4745 // We requested this block, but its far into the future, so our
4746 // mempool will probably be useless - request the block normally
4747 std::vector<CInv> vInv(1);
4748 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash);
4749 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4750 return;
4751 } else {
4752 // If this was an announce-cmpctblock, we want the same treatment as a header message
4753 fRevertToHeaderProcessing = true;
4754 }
4755 }
4756 } // cs_main
4757
4758 if (fProcessBLOCKTXN) {
4759 BlockTransactions txn;
4760 txn.blockhash = blockhash;
4761 return ProcessCompactBlockTxns(pfrom, peer, txn);
4762 }
4763
4764 if (fRevertToHeaderProcessing) {
4765 // Headers received from HB compact block peers are permitted to be
4766 // relayed before full validation (see BIP 152), so we don't want to disconnect
4767 // the peer if the header turns out to be for an invalid block.
4768 // Note that if a peer tries to build on an invalid chain, that
4769 // will be detected and the peer will be disconnected/discouraged.
4770 return ProcessHeadersMessage(pfrom, peer, {cmpctblock.header}, /*via_compact_block=*/true);
4771 }
4772
4773 if (fBlockReconstructed) {
4774 // If we got here, we were able to optimistically reconstruct a
4775 // block that is in flight from some other peer.
4776 {
4777 LOCK(cs_main);
4778 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4779 }
4780 // Setting force_processing to true means that we bypass some of
4781 // our anti-DoS protections in AcceptBlock, which filters
4782 // unrequested blocks that might be trying to waste our resources
4783 // (eg disk space). Because we only try to reconstruct blocks when
4784 // we're close to caught up (via the CanDirectFetch() requirement
4785 // above, combined with the behavior of not requesting blocks until
4786 // we have a chain with at least the minimum chain work), and we ignore
4787 // compact blocks with less work than our tip, it is safe to treat
4788 // reconstructed compact blocks as having been requested.
4789 ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4790 LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4791 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4792 // Clear download state for this block, which is in
4793 // process from some other peer. We do this after calling
4794 // ProcessNewBlock so that a malleated cmpctblock announcement
4795 // can't be used to interfere with block relay.
4796 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4797 }
4798 }
4799 return;
4800 }
4801
4802 if (msg_type == NetMsgType::BLOCKTXN)
4803 {
4804 // Ignore blocktxn received while importing
4805 if (m_chainman.m_blockman.LoadingBlocks()) {
4806 LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4807 return;
4808 }
4809
4810 BlockTransactions resp;
4811 vRecv >> resp;
4812
4813 return ProcessCompactBlockTxns(pfrom, peer, resp);
4814 }
4815
4816 if (msg_type == NetMsgType::HEADERS)
4817 {
4818 // Ignore headers received while importing
4819 if (m_chainman.m_blockman.LoadingBlocks()) {
4820 LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4821 return;
4822 }
4823
4824 std::vector<CBlockHeader> headers;
4825
4826 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4827 unsigned int nCount = ReadCompactSize(vRecv);
4828 if (nCount > m_opts.max_headers_result) {
4829 Misbehaving(peer, strprintf("headers message size = %u", nCount));
4830 return;
4831 }
4832 headers.resize(nCount);
4833 for (unsigned int n = 0; n < nCount; n++) {
4834 vRecv >> headers[n];
4835 ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4836 }
4837
4838 ProcessHeadersMessage(pfrom, peer, std::move(headers), /*via_compact_block=*/false);
4839
4840 // Check if the headers presync progress needs to be reported to validation.
4841 // This needs to be done without holding the m_headers_presync_mutex lock.
4842 if (m_headers_presync_should_signal.exchange(false)) {
4843 HeadersPresyncStats stats;
4844 {
4845 LOCK(m_headers_presync_mutex);
4846 auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4847 if (it != m_headers_presync_stats.end()) stats = it->second;
4848 }
4849 if (stats.second) {
4850 m_chainman.ReportHeadersPresync(stats.second->first, stats.second->second);
4851 }
4852 }
4853
4854 return;
4855 }
4856
4857 if (msg_type == NetMsgType::BLOCK)
4858 {
4859 // Ignore block received while importing
4860 if (m_chainman.m_blockman.LoadingBlocks()) {
4861 LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4862 return;
4863 }
4864
4865 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4866 vRecv >> TX_WITH_WITNESS(*pblock);
4867
4868 LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4869
4870 const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
4871
4872 // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
4873 if (prev_block && IsBlockMutated(/*block=*/*pblock,
4874 /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
4875 LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer.m_id);
4876 Misbehaving(peer, "mutated block");
4877 WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer.m_id));
4878 return;
4879 }
4880
4881 bool forceProcessing = false;
4882 const uint256 hash(pblock->GetHash());
4883 bool min_pow_checked = false;
4884 {
4885 LOCK(cs_main);
4886 // Always process the block if we requested it, since we may
4887 // need it even when it's not a candidate for a new best tip.
4888 forceProcessing = IsBlockRequested(hash);
4889 RemoveBlockRequest(hash, pfrom.GetId());
4890 // mapBlockSource is only used for punishing peers and setting
4891 // which peers send us compact blocks, so the race between here and
4892 // cs_main in ProcessNewBlock is fine.
4893 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4894
4895 // Check claimed work on this block against our anti-dos thresholds.
4896 if (prev_block && prev_block->nChainWork + GetBlockProof(*pblock) >= GetAntiDoSWorkThreshold()) {
4897 min_pow_checked = true;
4898 }
4899 }
4900 ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4901 return;
4902 }
4903
4904 if (msg_type == NetMsgType::GETADDR) {
4905 // This asymmetric behavior for inbound and outbound connections was introduced
4906 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4907 // to users' AddrMan and later request them by sending getaddr messages.
4908 // Making nodes which are behind NAT and can only make outgoing connections ignore
4909 // the getaddr message mitigates the attack.
4910 if (!pfrom.IsInboundConn()) {
4911 LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4912 return;
4913 }
4914
4915 // Since this must be an inbound connection, SetupAddressRelay will
4916 // never fail.
4917 Assume(SetupAddressRelay(pfrom, peer));
4918
4919 // Only send one GetAddr response per connection to reduce resource waste
4920 // and discourage addr stamping of INV announcements.
4921 if (peer.m_getaddr_recvd) {
4922 LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4923 return;
4924 }
4925 peer.m_getaddr_recvd = true;
4926
4927 peer.m_addrs_to_send.clear();
4928 std::vector<CAddress> vAddr;
4930 vAddr = m_connman.GetAddressesUnsafe(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4931 } else {
4932 vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4933 }
4934 for (const CAddress &addr : vAddr) {
4935 PushAddress(peer, addr);
4936 }
4937 return;
4938 }
4939
4940 if (msg_type == NetMsgType::MEMPOOL) {
4941 // Only process received mempool messages if we advertise NODE_BLOOM
4942 // or if the peer has mempool permissions.
4943 if (!(peer.m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4944 {
4946 {
4947 LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs));
4948 pfrom.fDisconnect = true;
4949 }
4950 return;
4951 }
4952
4953 if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4954 {
4956 {
4957 LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
4958 pfrom.fDisconnect = true;
4959 }
4960 return;
4961 }
4962
4963 if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
4964 LOCK(tx_relay->m_tx_inventory_mutex);
4965 tx_relay->m_send_mempool = true;
4966 }
4967 return;
4968 }
4969
4970 if (msg_type == NetMsgType::PING) {
4971 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4972 uint64_t nonce = 0;
4973 vRecv >> nonce;
4974 // Echo the message back with the nonce. This allows for two useful features:
4975 //
4976 // 1) A remote node can quickly check if the connection is operational
4977 // 2) Remote nodes can measure the latency of the network thread. If this node
4978 // is overloaded it won't respond to pings quickly and the remote node can
4979 // avoid sending us more work, like chain download requests.
4980 //
4981 // The nonce stops the remote getting confused between different pings: without
4982 // it, if the remote node sends a ping once per second and this node takes 5
4983 // seconds to respond to each, the 5th ping the remote sends would appear to
4984 // return very quickly.
4985 MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
4986 }
4987 return;
4988 }
4989
4990 if (msg_type == NetMsgType::PONG) {
4991 const auto ping_end = time_received;
4992 uint64_t nonce = 0;
4993 size_t nAvail = vRecv.in_avail();
4994 bool bPingFinished = false;
4995 std::string sProblem;
4996
4997 if (nAvail >= sizeof(nonce)) {
4998 vRecv >> nonce;
4999
5000 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5001 if (peer.m_ping_nonce_sent != 0) {
5002 if (nonce == peer.m_ping_nonce_sent) {
5003 // Matching pong received, this ping is no longer outstanding
5004 bPingFinished = true;
5005 const auto ping_time = ping_end - peer.m_ping_start.load();
5006 if (ping_time.count() >= 0) {
5007 // Let connman know about this successful ping-pong
5008 pfrom.PongReceived(ping_time);
5009 if (pfrom.IsPrivateBroadcastConn()) {
5010 m_tx_for_private_broadcast.NodeConfirmedReception(pfrom.GetId());
5011 LogDebug(BCLog::PRIVBROADCAST, "Got a PONG (the transaction will probably reach the network), marking for disconnect, peer=%d%s",
5012 pfrom.GetId(), pfrom.LogIP(fLogIPs));
5013 pfrom.fDisconnect = true;
5014 }
5015 } else {
5016 // This should never happen
5017 sProblem = "Timing mishap";
5018 }
5019 } else {
5020 // Nonce mismatches are normal when pings are overlapping
5021 sProblem = "Nonce mismatch";
5022 if (nonce == 0) {
5023 // This is most likely a bug in another implementation somewhere; cancel this ping
5024 bPingFinished = true;
5025 sProblem = "Nonce zero";
5026 }
5027 }
5028 } else {
5029 sProblem = "Unsolicited pong without ping";
5030 }
5031 } else {
5032 // This is most likely a bug in another implementation somewhere; cancel this ping
5033 bPingFinished = true;
5034 sProblem = "Short payload";
5035 }
5036
5037 if (!(sProblem.empty())) {
5038 LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5039 pfrom.GetId(),
5040 sProblem,
5041 peer.m_ping_nonce_sent,
5042 nonce,
5043 nAvail);
5044 }
5045 if (bPingFinished) {
5046 peer.m_ping_nonce_sent = 0;
5047 }
5048 return;
5049 }
5050
5051 if (msg_type == NetMsgType::FILTERLOAD) {
5052 if (!(peer.m_our_services & NODE_BLOOM)) {
5053 LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
5054 pfrom.fDisconnect = true;
5055 return;
5056 }
5057 CBloomFilter filter;
5058 vRecv >> filter;
5059
5060 if (!filter.IsWithinSizeConstraints())
5061 {
5062 // There is no excuse for sending a too-large filter
5063 Misbehaving(peer, "too-large bloom filter");
5064 } else if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
5065 {
5066 LOCK(tx_relay->m_bloom_filter_mutex);
5067 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
5068 tx_relay->m_relay_txs = true;
5069 }
5070 pfrom.m_bloom_filter_loaded = true;
5071 pfrom.m_relays_txs = true;
5072 }
5073 return;
5074 }
5075
5076 if (msg_type == NetMsgType::FILTERADD) {
5077 if (!(peer.m_our_services & NODE_BLOOM)) {
5078 LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
5079 pfrom.fDisconnect = true;
5080 return;
5081 }
5082 std::vector<unsigned char> vData;
5083 vRecv >> vData;
5084
5085 // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object,
5086 // and thus, the maximum size any matched object can have) in a filteradd message
5087 bool bad = false;
5088 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
5089 bad = true;
5090 } else if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
5091 LOCK(tx_relay->m_bloom_filter_mutex);
5092 if (tx_relay->m_bloom_filter) {
5093 tx_relay->m_bloom_filter->insert(vData);
5094 } else {
5095 bad = true;
5096 }
5097 }
5098 if (bad) {
5099 Misbehaving(peer, "bad filteradd message");
5100 }
5101 return;
5102 }
5103
5104 if (msg_type == NetMsgType::FILTERCLEAR) {
5105 if (!(peer.m_our_services & NODE_BLOOM)) {
5106 LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
5107 pfrom.fDisconnect = true;
5108 return;
5109 }
5110 auto tx_relay = peer.GetTxRelay();
5111 if (!tx_relay) return;
5112
5113 {
5114 LOCK(tx_relay->m_bloom_filter_mutex);
5115 tx_relay->m_bloom_filter = nullptr;
5116 tx_relay->m_relay_txs = true;
5117 }
5118 pfrom.m_bloom_filter_loaded = false;
5119 pfrom.m_relays_txs = true;
5120 return;
5121 }
5122
5123 if (msg_type == NetMsgType::FEEFILTER) {
5124 CAmount newFeeFilter = 0;
5125 vRecv >> newFeeFilter;
5126 if (MoneyRange(newFeeFilter)) {
5127 if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
5128 tx_relay->m_fee_filter_received = newFeeFilter;
5129 }
5130 LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
5131 }
5132 return;
5133 }
5134
5135 if (msg_type == NetMsgType::GETCFILTERS) {
5136 ProcessGetCFilters(pfrom, peer, vRecv);
5137 return;
5138 }
5139
5140 if (msg_type == NetMsgType::GETCFHEADERS) {
5141 ProcessGetCFHeaders(pfrom, peer, vRecv);
5142 return;
5143 }
5144
5145 if (msg_type == NetMsgType::GETCFCHECKPT) {
5146 ProcessGetCFCheckPt(pfrom, peer, vRecv);
5147 return;
5148 }
5149
5150 if (msg_type == NetMsgType::NOTFOUND) {
5151 std::vector<CInv> vInv;
5152 vRecv >> vInv;
5153 std::vector<GenTxid> tx_invs;
5155 for (CInv &inv : vInv) {
5156 if (inv.IsGenTxMsg()) {
5157 tx_invs.emplace_back(ToGenTxid(inv));
5158 }
5159 }
5160 }
5161 LOCK(m_tx_download_mutex);
5162 m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs);
5163 return;
5164 }
5165
5166 // Ignore unknown message types for extensibility
5167 LogDebug(BCLog::NET, "Unknown message type \"%s\" from peer=%d", SanitizeString(msg_type), pfrom.GetId());
5168 return;
5169}
5170
5171bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
5172{
5173 {
5174 LOCK(peer.m_misbehavior_mutex);
5175
5176 // There's nothing to do if the m_should_discourage flag isn't set
5177 if (!peer.m_should_discourage) return false;
5178
5179 peer.m_should_discourage = false;
5180 } // peer.m_misbehavior_mutex
5181
5183 // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
5184 LogWarning("Not punishing noban peer %d!", peer.m_id);
5185 return false;
5186 }
5187
5188 if (pnode.IsManualConn()) {
5189 // We never disconnect or discourage manual peers for bad behavior
5190 LogWarning("Not punishing manually connected peer %d!", peer.m_id);
5191 return false;
5192 }
5193
5194 if (pnode.addr.IsLocal()) {
5195 // We disconnect local peers for bad behavior but don't discourage (since that would discourage
5196 // all peers on the same local address)
5197 LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
5198 pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
5199 pnode.fDisconnect = true;
5200 return true;
5201 }
5202
5203 // Normal case: Disconnect the peer and discourage all nodes sharing the address
5204 LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
5205 if (m_banman) m_banman->Discourage(pnode.addr);
5206 m_connman.DisconnectNode(pnode.addr);
5207 return true;
5208}
5209
5210bool PeerManagerImpl::ProcessMessages(CNode& node, std::atomic<bool>& interruptMsgProc)
5211{
5212 AssertLockNotHeld(m_tx_download_mutex);
5213 AssertLockHeld(g_msgproc_mutex);
5214
5215 PeerRef maybe_peer{GetPeerRef(node.GetId())};
5216 if (maybe_peer == nullptr) return false;
5217 Peer& peer{*maybe_peer};
5218
5219 // For outbound connections, ensure that the initial VERSION message
5220 // has been sent first before processing any incoming messages
5221 if (!node.IsInboundConn() && !peer.m_outbound_version_message_sent) return false;
5222
5223 {
5224 LOCK(peer.m_getdata_requests_mutex);
5225 if (!peer.m_getdata_requests.empty()) {
5226 ProcessGetData(node, peer, interruptMsgProc);
5227 }
5228 }
5229
5230 const bool processed_orphan = ProcessOrphanTx(peer);
5231
5232 if (node.fDisconnect)
5233 return false;
5234
5235 if (processed_orphan) return true;
5236
5237 // this maintains the order of responses
5238 // and prevents m_getdata_requests to grow unbounded
5239 {
5240 LOCK(peer.m_getdata_requests_mutex);
5241 if (!peer.m_getdata_requests.empty()) return true;
5242 }
5243
5244 // Don't bother if send buffer is too full to respond anyway
5245 if (node.fPauseSend) return false;
5246
5247 auto poll_result{node.PollMessage()};
5248 if (!poll_result) {
5249 // No message to process
5250 return false;
5251 }
5252
5253 CNetMessage& msg{poll_result->first};
5254 bool fMoreWork = poll_result->second;
5255
5256 TRACEPOINT(net, inbound_message,
5257 node.GetId(),
5258 node.m_addr_name.c_str(),
5259 node.ConnectionTypeAsString().c_str(),
5260 msg.m_type.c_str(),
5261 msg.m_recv.size(),
5262 msg.m_recv.data()
5263 );
5264
5265 if (m_opts.capture_messages) {
5266 CaptureMessage(node.addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
5267 }
5268
5269 try {
5270 ProcessMessage(peer, node, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
5271 if (interruptMsgProc) return false;
5272 {
5273 LOCK(peer.m_getdata_requests_mutex);
5274 if (!peer.m_getdata_requests.empty()) fMoreWork = true;
5275 }
5276 // Does this peer have an orphan ready to reconsider?
5277 // (Note: we may have provided a parent for an orphan provided
5278 // by another peer that was already processed; in that case,
5279 // the extra work may not be noticed, possibly resulting in an
5280 // unnecessary 100ms delay)
5281 LOCK(m_tx_download_mutex);
5282 if (m_txdownloadman.HaveMoreWork(peer.m_id)) fMoreWork = true;
5283 } catch (const std::exception& e) {
5284 LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5285 } catch (...) {
5286 LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5287 }
5288
5289 return fMoreWork;
5290}
5291
5292void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5293{
5295
5296 CNodeState &state = *State(pto.GetId());
5297
5298 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5299 // This is an outbound peer subject to disconnection if they don't
5300 // announce a block with as much work as the current tip within
5301 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5302 // their chain has more work than ours, we should sync to it,
5303 // unless it's invalid, in which case we should find that out and
5304 // disconnect from them elsewhere).
5305 if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5306 // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set
5307 if (state.m_chain_sync.m_timeout != 0s) {
5308 state.m_chain_sync.m_timeout = 0s;
5309 state.m_chain_sync.m_work_header = nullptr;
5310 state.m_chain_sync.m_sent_getheaders = false;
5311 }
5312 } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5313 // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours
5314 // AND
5315 // we are noticing this for the first time (m_timeout is 0)
5316 // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout
5317 // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced).
5318 // Either way, set a new timeout based on our current tip.
5319 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5320 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5321 state.m_chain_sync.m_sent_getheaders = false;
5322 } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5323 // No evidence yet that our peer has synced to a chain with work equal to that
5324 // of our tip, when we first detected it was behind. Send a single getheaders
5325 // message to give the peer a chance to update us.
5326 if (state.m_chain_sync.m_sent_getheaders) {
5327 // They've run out of time to catch up!
5328 LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs));
5329 pto.fDisconnect = true;
5330 } else {
5331 assert(state.m_chain_sync.m_work_header);
5332 // Here, we assume that the getheaders message goes out,
5333 // because it'll either go out or be skipped because of a
5334 // getheaders in-flight already, in which case the peer should
5335 // still respond to us with a sufficiently high work chain tip.
5336 MaybeSendGetHeaders(pto,
5337 GetLocator(state.m_chain_sync.m_work_header->pprev),
5338 peer);
5339 LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5340 state.m_chain_sync.m_sent_getheaders = true;
5341 // Bump the timeout to allow a response, which could clear the timeout
5342 // (if the response shows the peer has synced), reset the timeout (if
5343 // the peer syncs to the required work but not to our tip), or result
5344 // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5345 // has not sufficiently progressed)
5346 state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5347 }
5348 }
5349 }
5350}
5351
5352void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5353{
5354 // If we have any extra block-relay-only peers, disconnect the youngest unless
5355 // it's given us a block -- in which case, compare with the second-youngest, and
5356 // out of those two, disconnect the peer who least recently gave us a block.
5357 // The youngest block-relay-only peer would be the extra peer we connected
5358 // to temporarily in order to sync our tip; see net.cpp.
5359 // Note that we use higher nodeid as a measure for most recent connection.
5360 if (m_connman.GetExtraBlockRelayCount() > 0) {
5361 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5362
5363 m_connman.ForEachNode([&](CNode* pnode) {
5364 if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5365 if (pnode->GetId() > youngest_peer.first) {
5366 next_youngest_peer = youngest_peer;
5367 youngest_peer.first = pnode->GetId();
5368 youngest_peer.second = pnode->m_last_block_time;
5369 }
5370 });
5371 NodeId to_disconnect = youngest_peer.first;
5372 if (youngest_peer.second > next_youngest_peer.second) {
5373 // Our newest block-relay-only peer gave us a block more recently;
5374 // disconnect our second youngest.
5375 to_disconnect = next_youngest_peer.first;
5376 }
5377 m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5379 // Make sure we're not getting a block right now, and that
5380 // we've been connected long enough for this eviction to happen
5381 // at all.
5382 // Note that we only request blocks from a peer if we learn of a
5383 // valid headers chain with at least as much work as our tip.
5384 CNodeState *node_state = State(pnode->GetId());
5385 if (node_state == nullptr ||
5386 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5387 pnode->fDisconnect = true;
5388 LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5389 pnode->GetId(), count_seconds(pnode->m_last_block_time));
5390 return true;
5391 } else {
5392 LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5393 pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5394 }
5395 return false;
5396 });
5397 }
5398
5399 // Check whether we have too many outbound-full-relay peers
5400 if (m_connman.GetExtraFullOutboundCount() > 0) {
5401 // If we have more outbound-full-relay peers than we target, disconnect one.
5402 // Pick the outbound-full-relay peer that least recently announced
5403 // us a new block, with ties broken by choosing the more recent
5404 // connection (higher node id)
5405 // Protect peers from eviction if we don't have another connection
5406 // to their network, counting both outbound-full-relay and manual peers.
5407 NodeId worst_peer = -1;
5408 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5409
5410 m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5411 AssertLockHeld(::cs_main);
5412
5413 // Only consider outbound-full-relay peers that are not already
5414 // marked for disconnection
5415 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5416 CNodeState *state = State(pnode->GetId());
5417 if (state == nullptr) return; // shouldn't be possible, but just in case
5418 // Don't evict our protected peers
5419 if (state->m_chain_sync.m_protect) return;
5420 // If this is the only connection on a particular network that is
5421 // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5422 if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5423 if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5424 worst_peer = pnode->GetId();
5425 oldest_block_announcement = state->m_last_block_announcement;
5426 }
5427 });
5428 if (worst_peer != -1) {
5429 bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5431
5432 // Only disconnect a peer that has been connected to us for
5433 // some reasonable fraction of our check-frequency, to give
5434 // it time for new information to have arrived.
5435 // Also don't disconnect any peer we're trying to download a
5436 // block from.
5437 CNodeState &state = *State(pnode->GetId());
5438 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5439 LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5440 pnode->fDisconnect = true;
5441 return true;
5442 } else {
5443 LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5444 pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5445 return false;
5446 }
5447 });
5448 if (disconnected) {
5449 // If we disconnected an extra peer, that means we successfully
5450 // connected to at least one peer after the last time we
5451 // detected a stale tip. Don't try any more extra peers until
5452 // we next detect a stale tip, to limit the load we put on the
5453 // network from these extra connections.
5454 m_connman.SetTryNewOutboundPeer(false);
5455 }
5456 }
5457 }
5458}
5459
5460void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5461{
5462 LOCK(cs_main);
5463
5465
5466 EvictExtraOutboundPeers(now);
5467
5468 if (now > m_stale_tip_check_time) {
5469 // Check whether our tip is stale, and if so, allow using an extra
5470 // outbound peer
5471 if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5472 LogInfo("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5473 count_seconds(now - m_last_tip_update.load()));
5474 m_connman.SetTryNewOutboundPeer(true);
5475 } else if (m_connman.GetTryNewOutboundPeer()) {
5476 m_connman.SetTryNewOutboundPeer(false);
5477 }
5478 m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5479 }
5480
5481 if (!m_initial_sync_finished && CanDirectFetch()) {
5482 m_connman.StartExtraBlockRelayPeers();
5483 m_initial_sync_finished = true;
5484 }
5485}
5486
5487void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5488{
5489 if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5490 peer.m_ping_nonce_sent &&
5491 now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5492 {
5493 // The ping timeout is using mocktime. To disable the check during
5494 // testing, increase -peertimeout.
5495 LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs));
5496 node_to.fDisconnect = true;
5497 return;
5498 }
5499
5500 bool pingSend = false;
5501
5502 if (peer.m_ping_queued) {
5503 // RPC ping request by user
5504 pingSend = true;
5505 }
5506
5507 if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5508 // Ping automatically sent as a latency probe & keepalive.
5509 pingSend = true;
5510 }
5511
5512 if (pingSend) {
5513 uint64_t nonce;
5514 do {
5515 nonce = FastRandomContext().rand64();
5516 } while (nonce == 0);
5517 peer.m_ping_queued = false;
5518 peer.m_ping_start = now;
5519 if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5520 peer.m_ping_nonce_sent = nonce;
5521 MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
5522 } else {
5523 // Peer is too old to support ping message type with nonce, pong will never arrive.
5524 peer.m_ping_nonce_sent = 0;
5525 MakeAndPushMessage(node_to, NetMsgType::PING);
5526 }
5527 }
5528}
5529
5530void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5531{
5532 // Nothing to do for non-address-relay peers
5533 if (!peer.m_addr_relay_enabled) return;
5534
5535 LOCK(peer.m_addr_send_times_mutex);
5536 // Periodically advertise our local address to the peer.
5537 if (fListen && !m_chainman.IsInitialBlockDownload() &&
5538 peer.m_next_local_addr_send < current_time) {
5539 // If we've sent before, clear the bloom filter for the peer, so that our
5540 // self-announcement will actually go out.
5541 // This might be unnecessary if the bloom filter has already rolled
5542 // over since our last self-announcement, but there is only a small
5543 // bandwidth cost that we can incur by doing this (which happens
5544 // once a day on average).
5545 if (peer.m_next_local_addr_send != 0us) {
5546 peer.m_addr_known->reset();
5547 }
5548 if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5549 CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5550 if (peer.m_next_local_addr_send == 0us) {
5551 // Send the initial self-announcement in its own message. This makes sure
5552 // rate-limiting with limited start-tokens doesn't ignore it if the first
5553 // message ends up containing multiple addresses.
5554 if (IsAddrCompatible(peer, local_addr)) {
5555 std::vector<CAddress> self_announcement{local_addr};
5556 if (peer.m_wants_addrv2) {
5557 MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(self_announcement));
5558 } else {
5559 MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(self_announcement));
5560 }
5561 }
5562 } else {
5563 // All later self-announcements are sent together with the other addresses.
5564 PushAddress(peer, local_addr);
5565 }
5566 }
5567 peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5568 }
5569
5570 // We sent an `addr` message to this peer recently. Nothing more to do.
5571 if (current_time <= peer.m_next_addr_send) return;
5572
5573 peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
5574
5575 if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5576 // Should be impossible since we always check size before adding to
5577 // m_addrs_to_send. Recover by trimming the vector.
5578 peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5579 }
5580
5581 // Remove addr records that the peer already knows about, and add new
5582 // addrs to the m_addr_known filter on the same pass.
5583 auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5584 bool ret = peer.m_addr_known->contains(addr.GetKey());
5585 if (!ret) peer.m_addr_known->insert(addr.GetKey());
5586 return ret;
5587 };
5588 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5589 peer.m_addrs_to_send.end());
5590
5591 // No addr messages to send
5592 if (peer.m_addrs_to_send.empty()) return;
5593
5594 if (peer.m_wants_addrv2) {
5595 MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send));
5596 } else {
5597 MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send));
5598 }
5599 peer.m_addrs_to_send.clear();
5600
5601 // we only send the big addr message once
5602 if (peer.m_addrs_to_send.capacity() > 40) {
5603 peer.m_addrs_to_send.shrink_to_fit();
5604 }
5605}
5606
5607void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5608{
5609 // Delay sending SENDHEADERS (BIP 130) until we're done with an
5610 // initial-headers-sync with this peer. Receiving headers announcements for
5611 // new blocks while trying to sync their headers chain is problematic,
5612 // because of the state tracking done.
5613 if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5614 LOCK(cs_main);
5615 CNodeState &state = *State(node.GetId());
5616 if (state.pindexBestKnownBlock != nullptr &&
5617 state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5618 // Tell our peer we prefer to receive headers rather than inv's
5619 // We send this to non-NODE NETWORK peers as well, because even
5620 // non-NODE NETWORK peers can announce blocks (such as pruning
5621 // nodes)
5622 MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
5623 peer.m_sent_sendheaders = true;
5624 }
5625 }
5626}
5627
5628void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5629{
5630 if (m_opts.ignore_incoming_txs) return;
5631 if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5632 // peers with the forcerelay permission should not filter txs to us
5634 // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5635 // transactions to us, regardless of feefilter state.
5636 if (pto.IsBlockOnlyConn()) return;
5637
5638 CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5639
5640 if (m_chainman.IsInitialBlockDownload()) {
5641 // Received tx-inv messages are discarded when the active
5642 // chainstate is in IBD, so tell the peer to not send them.
5643 currentFilter = MAX_MONEY;
5644 } else {
5645 static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5646 if (peer.m_fee_filter_sent == MAX_FILTER) {
5647 // Send the current filter if we sent MAX_FILTER previously
5648 // and made it out of IBD.
5649 peer.m_next_send_feefilter = 0us;
5650 }
5651 }
5652 if (current_time > peer.m_next_send_feefilter) {
5653 CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5654 // We always have a fee filter of at least the min relay fee
5655 filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK());
5656 if (filterToSend != peer.m_fee_filter_sent) {
5657 MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
5658 peer.m_fee_filter_sent = filterToSend;
5659 }
5660 peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
5661 }
5662 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5663 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5664 else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5665 (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5666 peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5667 }
5668}
5669
5670namespace {
5671class CompareInvMempoolOrder
5672{
5673 const CTxMemPool* m_mempool;
5674public:
5675 explicit CompareInvMempoolOrder(CTxMemPool* mempool) : m_mempool{mempool} {}
5676
5677 bool operator()(std::set<Wtxid>::iterator a, std::set<Wtxid>::iterator b)
5678 {
5679 /* As std::make_heap produces a max-heap, we want the entries with the
5680 * higher mining score to sort later. */
5681 return m_mempool->CompareMiningScoreWithTopology(*b, *a);
5682 }
5683};
5684} // namespace
5685
5686bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5687{
5688 // block-relay-only peers may never send txs to us
5689 if (peer.IsBlockOnlyConn()) return true;
5690 if (peer.IsFeelerConn()) return true;
5691 // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5692 if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5693 return false;
5694}
5695
5696bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5697{
5698 // We don't participate in addr relay with outbound block-relay-only
5699 // connections to prevent providing adversaries with the additional
5700 // information of addr traffic to infer the link.
5701 if (node.IsBlockOnlyConn()) return false;
5702
5703 if (!peer.m_addr_relay_enabled.exchange(true)) {
5704 // During version message processing (non-block-relay-only outbound peers)
5705 // or on first addr-related message we have received (inbound peers), initialize
5706 // m_addr_known.
5707 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5708 }
5709
5710 return true;
5711}
5712
5713bool PeerManagerImpl::SendMessages(CNode& node)
5714{
5715 AssertLockNotHeld(m_tx_download_mutex);
5716 AssertLockHeld(g_msgproc_mutex);
5717
5718 PeerRef maybe_peer{GetPeerRef(node.GetId())};
5719 if (!maybe_peer) return false;
5720 Peer& peer{*maybe_peer};
5721 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5722
5723 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5724 // disconnect misbehaving peers even before the version handshake is complete.
5725 if (MaybeDiscourageAndDisconnect(node, peer)) return true;
5726
5727 // Initiate version handshake for outbound connections
5728 if (!node.IsInboundConn() && !peer.m_outbound_version_message_sent) {
5729 PushNodeVersion(node, peer);
5730 peer.m_outbound_version_message_sent = true;
5731 }
5732
5733 // Don't send anything until the version handshake is complete
5734 if (!node.fSuccessfullyConnected || node.fDisconnect)
5735 return true;
5736
5737 const auto current_time{GetTime<std::chrono::microseconds>()};
5738
5739 // The logic below does not apply to private broadcast peers, so skip it.
5740 // Also in CConnman::PushMessage() we make sure that unwanted messages are
5741 // not sent. This here is just an optimization.
5742 if (node.IsPrivateBroadcastConn()) {
5743 if (node.m_connected + PRIVATE_BROADCAST_MAX_CONNECTION_LIFETIME < current_time) {
5744 LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: did not complete the transaction send within %d seconds, peer=%d%s",
5746 node.fDisconnect = true;
5747 }
5748 return true;
5749 }
5750
5751 if (node.IsAddrFetchConn() && current_time - node.m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5752 LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", node.DisconnectMsg(fLogIPs));
5753 node.fDisconnect = true;
5754 return true;
5755 }
5756
5757 MaybeSendPing(node, peer, current_time);
5758
5759 // MaybeSendPing may have marked peer for disconnection
5760 if (node.fDisconnect) return true;
5761
5762 MaybeSendAddr(node, peer, current_time);
5763
5764 MaybeSendSendHeaders(node, peer);
5765
5766 {
5767 LOCK(cs_main);
5768
5769 CNodeState &state = *State(node.GetId());
5770
5771 // Start block sync
5772 if (m_chainman.m_best_header == nullptr) {
5773 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5774 }
5775
5776 // Determine whether we might try initial headers sync or parallel
5777 // block download from this peer -- this mostly affects behavior while
5778 // in IBD (once out of IBD, we sync from all peers).
5779 bool sync_blocks_and_headers_from_peer = false;
5780 if (state.fPreferredDownload) {
5781 sync_blocks_and_headers_from_peer = true;
5782 } else if (CanServeBlocks(peer) && !node.IsAddrFetchConn()) {
5783 // Typically this is an inbound peer. If we don't have any outbound
5784 // peers, or if we aren't downloading any blocks from such peers,
5785 // then allow block downloads from this peer, too.
5786 // We prefer downloading blocks from outbound peers to avoid
5787 // putting undue load on (say) some home user who is just making
5788 // outbound connections to the network, but if our only source of
5789 // the latest blocks is from an inbound peer, we have to be sure to
5790 // eventually download it (and not just wait indefinitely for an
5791 // outbound peer to have it).
5792 if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5793 sync_blocks_and_headers_from_peer = true;
5794 }
5795 }
5796
5797 if (!state.fSyncStarted && CanServeBlocks(peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5798 // Only actively request headers from a single peer, unless we're close to today.
5799 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
5800 const CBlockIndex* pindexStart = m_chainman.m_best_header;
5801 /* If possible, start at the block preceding the currently
5802 best known header. This ensures that we always get a
5803 non-empty list of headers back as long as the peer
5804 is up-to-date. With a non-empty response, we can initialise
5805 the peer's known best block. This wouldn't be possible
5806 if we requested starting at m_chainman.m_best_header and
5807 got back an empty response. */
5808 if (pindexStart->pprev)
5809 pindexStart = pindexStart->pprev;
5810 if (MaybeSendGetHeaders(node, GetLocator(pindexStart), peer)) {
5811 LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, node.GetId(), peer.m_starting_height);
5812
5813 state.fSyncStarted = true;
5814 peer.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5815 (
5816 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5817 // to maintain precision
5818 std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5819 Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5820 );
5821 nSyncStarted++;
5822 }
5823 }
5824 }
5825
5826 //
5827 // Try sending block announcements via headers
5828 //
5829 {
5830 // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5831 // list of block hashes we're relaying, and our peer wants
5832 // headers announcements, then find the first header
5833 // not yet known to our peer but would connect, and send.
5834 // If no header would connect, or if we have too many
5835 // blocks, or if the peer doesn't want headers, just
5836 // add all to the inv queue.
5837 LOCK(peer.m_block_inv_mutex);
5838 std::vector<CBlock> vHeaders;
5839 bool fRevertToInv = ((!peer.m_prefers_headers &&
5840 (!state.m_requested_hb_cmpctblocks || peer.m_blocks_for_headers_relay.size() > 1)) ||
5841 peer.m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5842 const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5843 ProcessBlockAvailability(node.GetId()); // ensure pindexBestKnownBlock is up-to-date
5844
5845 if (!fRevertToInv) {
5846 bool fFoundStartingHeader = false;
5847 // Try to find first header that our peer doesn't have, and
5848 // then send all headers past that one. If we come across any
5849 // headers that aren't on m_chainman.ActiveChain(), give up.
5850 for (const uint256& hash : peer.m_blocks_for_headers_relay) {
5851 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5852 assert(pindex);
5853 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5854 // Bail out if we reorged away from this block
5855 fRevertToInv = true;
5856 break;
5857 }
5858 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5859 // This means that the list of blocks to announce don't
5860 // connect to each other.
5861 // This shouldn't really be possible to hit during
5862 // regular operation (because reorgs should take us to
5863 // a chain that has some block not on the prior chain,
5864 // which should be caught by the prior check), but one
5865 // way this could happen is by using invalidateblock /
5866 // reconsiderblock repeatedly on the tip, causing it to
5867 // be added multiple times to m_blocks_for_headers_relay.
5868 // Robustly deal with this rare situation by reverting
5869 // to an inv.
5870 fRevertToInv = true;
5871 break;
5872 }
5873 pBestIndex = pindex;
5874 if (fFoundStartingHeader) {
5875 // add this to the headers message
5876 vHeaders.emplace_back(pindex->GetBlockHeader());
5877 } else if (PeerHasHeader(&state, pindex)) {
5878 continue; // keep looking for the first new block
5879 } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5880 // Peer doesn't have this header but they do have the prior one.
5881 // Start sending headers.
5882 fFoundStartingHeader = true;
5883 vHeaders.emplace_back(pindex->GetBlockHeader());
5884 } else {
5885 // Peer doesn't have this header or the prior one -- nothing will
5886 // connect, so bail out.
5887 fRevertToInv = true;
5888 break;
5889 }
5890 }
5891 }
5892 if (!fRevertToInv && !vHeaders.empty()) {
5893 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5894 // We only send up to 1 block as header-and-ids, as otherwise
5895 // probably means we're doing an initial-ish-sync or they're slow
5896 LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5897 vHeaders.front().GetHash().ToString(), node.GetId());
5898
5899 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5900 {
5901 LOCK(m_most_recent_block_mutex);
5902 if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5903 cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5904 }
5905 }
5906 if (cached_cmpctblock_msg.has_value()) {
5907 PushMessage(node, std::move(cached_cmpctblock_msg.value()));
5908 } else {
5909 CBlock block;
5910 const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)};
5911 assert(ret);
5912 CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()};
5913 MakeAndPushMessage(node, NetMsgType::CMPCTBLOCK, cmpctblock);
5914 }
5915 state.pindexBestHeaderSent = pBestIndex;
5916 } else if (peer.m_prefers_headers) {
5917 if (vHeaders.size() > 1) {
5918 LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5919 vHeaders.size(),
5920 vHeaders.front().GetHash().ToString(),
5921 vHeaders.back().GetHash().ToString(), node.GetId());
5922 } else {
5923 LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5924 vHeaders.front().GetHash().ToString(), node.GetId());
5925 }
5926 MakeAndPushMessage(node, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
5927 state.pindexBestHeaderSent = pBestIndex;
5928 } else
5929 fRevertToInv = true;
5930 }
5931 if (fRevertToInv) {
5932 // If falling back to using an inv, just try to inv the tip.
5933 // The last entry in m_blocks_for_headers_relay was our tip at some point
5934 // in the past.
5935 if (!peer.m_blocks_for_headers_relay.empty()) {
5936 const uint256& hashToAnnounce = peer.m_blocks_for_headers_relay.back();
5937 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5938 assert(pindex);
5939
5940 // Warn if we're announcing a block that is not on the main chain.
5941 // This should be very rare and could be optimized out.
5942 // Just log for now.
5943 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5944 LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5945 hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5946 }
5947
5948 // If the peer's chain has this block, don't inv it back.
5949 if (!PeerHasHeader(&state, pindex)) {
5950 peer.m_blocks_for_inv_relay.push_back(hashToAnnounce);
5951 LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5952 node.GetId(), hashToAnnounce.ToString());
5953 }
5954 }
5955 }
5956 peer.m_blocks_for_headers_relay.clear();
5957 }
5958
5959 //
5960 // Message: inventory
5961 //
5962 std::vector<CInv> vInv;
5963 {
5964 LOCK(peer.m_block_inv_mutex);
5965 vInv.reserve(std::max<size_t>(peer.m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5966
5967 // Add blocks
5968 for (const uint256& hash : peer.m_blocks_for_inv_relay) {
5969 vInv.emplace_back(MSG_BLOCK, hash);
5970 if (vInv.size() == MAX_INV_SZ) {
5971 MakeAndPushMessage(node, NetMsgType::INV, vInv);
5972 vInv.clear();
5973 }
5974 }
5975 peer.m_blocks_for_inv_relay.clear();
5976 }
5977
5978 if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
5979 LOCK(tx_relay->m_tx_inventory_mutex);
5980 // Check whether periodic sends should happen
5981 bool fSendTrickle = node.HasPermission(NetPermissionFlags::NoBan);
5982 if (tx_relay->m_next_inv_send_time < current_time) {
5983 fSendTrickle = true;
5984 if (node.IsInboundConn()) {
5985 tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, node.m_network_key);
5986 } else {
5987 tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5988 }
5989 }
5990
5991 // Time to send but the peer has requested we not relay transactions.
5992 if (fSendTrickle) {
5993 LOCK(tx_relay->m_bloom_filter_mutex);
5994 if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5995 }
5996
5997 // Respond to BIP35 mempool requests
5998 if (fSendTrickle && tx_relay->m_send_mempool) {
5999 auto vtxinfo = m_mempool.infoAll();
6000 tx_relay->m_send_mempool = false;
6001 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
6002
6003 LOCK(tx_relay->m_bloom_filter_mutex);
6004
6005 for (const auto& txinfo : vtxinfo) {
6006 const Txid& txid{txinfo.tx->GetHash()};
6007 const Wtxid& wtxid{txinfo.tx->GetWitnessHash()};
6008 const auto inv = peer.m_wtxid_relay ?
6009 CInv{MSG_WTX, wtxid.ToUint256()} :
6010 CInv{MSG_TX, txid.ToUint256()};
6011 tx_relay->m_tx_inventory_to_send.erase(wtxid);
6012
6013 // Don't send transactions that peers will not put into their mempool
6014 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
6015 continue;
6016 }
6017 if (tx_relay->m_bloom_filter) {
6018 if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
6019 }
6020 tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
6021 vInv.push_back(inv);
6022 if (vInv.size() == MAX_INV_SZ) {
6023 MakeAndPushMessage(node, NetMsgType::INV, vInv);
6024 vInv.clear();
6025 }
6026 }
6027 }
6028
6029 // Determine transactions to relay
6030 if (fSendTrickle) {
6031 // Produce a vector with all candidates for sending
6032 std::vector<std::set<Wtxid>::iterator> vInvTx;
6033 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
6034 for (std::set<Wtxid>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
6035 vInvTx.push_back(it);
6036 }
6037 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
6038 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
6039 // A heap is used so that not all items need sorting if only a few are being sent.
6040 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
6041 std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
6042 // No reason to drain out at many times the network's capacity,
6043 // especially since we have many peers and some will draw much shorter delays.
6044 unsigned int nRelayedTransactions = 0;
6045 LOCK(tx_relay->m_bloom_filter_mutex);
6046 size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
6047 broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
6048 while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
6049 // Fetch the top element from the heap
6050 std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
6051 std::set<Wtxid>::iterator it = vInvTx.back();
6052 vInvTx.pop_back();
6053 auto wtxid = *it;
6054 // Remove it from the to-be-sent set
6055 tx_relay->m_tx_inventory_to_send.erase(it);
6056 // Not in the mempool anymore? don't bother sending it.
6057 auto txinfo = m_mempool.info(wtxid);
6058 if (!txinfo.tx) {
6059 continue;
6060 }
6061 // `TxRelay::m_tx_inventory_known_filter` contains either txids or wtxids
6062 // depending on whether our peer supports wtxid-relay. Therefore, first
6063 // construct the inv and then use its hash for the filter check.
6064 const auto inv = peer.m_wtxid_relay ?
6065 CInv{MSG_WTX, wtxid.ToUint256()} :
6066 CInv{MSG_TX, txinfo.tx->GetHash().ToUint256()};
6067 // Check if not in the filter already
6068 if (tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) {
6069 continue;
6070 }
6071 // Peer told you to not send transactions at that feerate? Don't bother sending it.
6072 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
6073 continue;
6074 }
6075 if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
6076 // Send
6077 vInv.push_back(inv);
6078 nRelayedTransactions++;
6079 if (vInv.size() == MAX_INV_SZ) {
6080 MakeAndPushMessage(node, NetMsgType::INV, vInv);
6081 vInv.clear();
6082 }
6083 tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
6084 }
6085
6086 // Ensure we'll respond to GETDATA requests for anything we've just announced
6087 LOCK(m_mempool.cs);
6088 tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
6089 }
6090 }
6091 if (!vInv.empty())
6092 MakeAndPushMessage(node, NetMsgType::INV, vInv);
6093
6094 // Detect whether we're stalling
6095 auto stalling_timeout = m_block_stalling_timeout.load();
6096 if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
6097 // Stalling only triggers when the block download window cannot move. During normal steady state,
6098 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6099 // should only happen during initial block download.
6100 LogInfo("Peer is stalling block download, %s\n", node.DisconnectMsg(fLogIPs));
6101 node.fDisconnect = true;
6102 // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
6103 // bandwidth is insufficient.
6104 const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
6105 if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
6106 LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
6107 }
6108 return true;
6109 }
6110 // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
6111 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6112 // We compensate for other peers to prevent killing off peers due to our own downstream link
6113 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6114 // to unreasonably increase our timeout.
6115 if (state.vBlocksInFlight.size() > 0) {
6116 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
6117 int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
6118 if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
6119 LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), node.DisconnectMsg(fLogIPs));
6120 node.fDisconnect = true;
6121 return true;
6122 }
6123 }
6124 // Check for headers sync timeouts
6125 if (state.fSyncStarted && peer.m_headers_sync_timeout < std::chrono::microseconds::max()) {
6126 // Detect whether this is a stalling initial-headers-sync peer
6127 if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
6128 if (current_time > peer.m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
6129 // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
6130 // and we have others we could be using instead.
6131 // Note: If all our peers are inbound, then we won't
6132 // disconnect our sync peer for stalling; we have bigger
6133 // problems if we can't get any outbound peers.
6135 LogInfo("Timeout downloading headers, %s\n", node.DisconnectMsg(fLogIPs));
6136 node.fDisconnect = true;
6137 return true;
6138 } else {
6139 LogInfo("Timeout downloading headers from noban peer, not %s\n", node.DisconnectMsg(fLogIPs));
6140 // Reset the headers sync state so that we have a
6141 // chance to try downloading from a different peer.
6142 // Note: this will also result in at least one more
6143 // getheaders message to be sent to
6144 // this peer (eventually).
6145 state.fSyncStarted = false;
6146 nSyncStarted--;
6147 peer.m_headers_sync_timeout = 0us;
6148 }
6149 }
6150 } else {
6151 // After we've caught up once, reset the timeout so we can't trigger
6152 // disconnect later.
6153 peer.m_headers_sync_timeout = std::chrono::microseconds::max();
6154 }
6155 }
6156
6157 // Check that outbound peers have reasonable chains
6158 // GetTime() is used by this anti-DoS logic so we can test this using mocktime
6159 ConsiderEviction(node, peer, GetTime<std::chrono::seconds>());
6160
6161 //
6162 // Message: getdata (blocks)
6163 //
6164 std::vector<CInv> vGetData;
6165 if (CanServeBlocks(peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
6166 std::vector<const CBlockIndex*> vToDownload;
6167 NodeId staller = -1;
6168 auto get_inflight_budget = [&state]() {
6169 return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
6170 };
6171
6172 // If there are multiple chainstates, download blocks for the
6173 // current chainstate first, to prioritize getting to network tip
6174 // before downloading historical blocks.
6175 FindNextBlocksToDownload(peer, get_inflight_budget(), vToDownload, staller);
6176 auto historical_blocks{m_chainman.GetHistoricalBlockRange()};
6177 if (historical_blocks && !IsLimitedPeer(peer)) {
6178 // If the first needed historical block is not an ancestor of the last,
6179 // we need to start requesting blocks from their last common ancestor.
6180 const CBlockIndex* from_tip = LastCommonAncestor(historical_blocks->first, historical_blocks->second);
6181 TryDownloadingHistoricalBlocks(
6182 peer,
6183 get_inflight_budget(),
6184 vToDownload, from_tip, historical_blocks->second);
6185 }
6186 for (const CBlockIndex *pindex : vToDownload) {
6187 uint32_t nFetchFlags = GetFetchFlags(peer);
6188 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
6189 BlockRequested(node.GetId(), *pindex);
6190 LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
6191 pindex->nHeight, node.GetId());
6192 }
6193 if (state.vBlocksInFlight.empty() && staller != -1) {
6194 if (State(staller)->m_stalling_since == 0us) {
6195 State(staller)->m_stalling_since = current_time;
6196 LogDebug(BCLog::NET, "Stall started peer=%d\n", staller);
6197 }
6198 }
6199 }
6200
6201 //
6202 // Message: getdata (transactions)
6203 //
6204 {
6205 LOCK(m_tx_download_mutex);
6206 for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(node.GetId(), current_time)) {
6207 vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(peer)), gtxid.ToUint256());
6208 if (vGetData.size() >= MAX_GETDATA_SZ) {
6209 MakeAndPushMessage(node, NetMsgType::GETDATA, vGetData);
6210 vGetData.clear();
6211 }
6212 }
6213 }
6214
6215 if (!vGetData.empty())
6216 MakeAndPushMessage(node, NetMsgType::GETDATA, vGetData);
6217 } // release cs_main
6218 MaybeSendFeefilter(node, peer, current_time);
6219 return true;
6220}
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition amount.h:26
bool MoneyRange(const CAmount &nValue)
Definition amount.h:27
int64_t CAmount
Amount in satoshis (Can be negative).
Definition amount.h:12
int ret
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition string.h:246
ArgsManager & args
Definition bitcoind.cpp:277
@ READ_STATUS_OK
@ READ_STATUS_INVALID
@ READ_STATUS_FAILED
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterType
Definition blockfilter.h:94
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition chain.cpp:45
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition chain.cpp:136
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition chain.cpp:155
@ BLOCK_VALID_CHAIN
Definition chain.h:65
@ BLOCK_VALID_TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
Definition chain.h:61
@ BLOCK_VALID_SCRIPTS
Definition chain.h:69
@ BLOCK_VALID_TREE
Definition chain.h:51
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition chain.h:75
arith_uint256 GetBlockProof(const CBlockIndex &block)
Compute how much work a block index entry corresponds to.
Definition chain.h:305
#define Assert(val)
Identity function.
Definition check.h:113
#define Assume(val)
Assume is the identity function.
Definition check.h:125
Stochastic address manager.
Definition addrman.h:89
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition addrman.cpp:1342
bool Good(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
Mark an address record as accessible and attempt to move it to addrman's tried table.
Definition addrman.cpp:1307
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
Definition addrman.cpp:1302
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
Definition addrman.cpp:1347
bool IsBanned(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is banned.
Definition banman.cpp:89
bool IsDiscouraged(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is discouraged.
Definition banman.cpp:83
void Discourage(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Definition banman.cpp:124
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint16_t > indexes
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition protocol.h:459
static constexpr SerParams V1_NETWORK
Definition protocol.h:408
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
Definition protocol.h:457
static constexpr SerParams V2_NETWORK
Definition protocol.h:409
uint256 hashPrevBlock
Definition block.h:31
uint256 GetHash() const
Definition block.cpp:15
bool IsNull() const
Definition block.h:54
std::vector< CTransactionRef > vtx
Definition block.h:77
bool IsValid(enum BlockStatus nUpTo) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition chain.h:250
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition chain.h:100
CBlockHeader GetBlockHeader() const
Definition chain.h:185
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition chain.h:118
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition chain.h:214
uint256 GetBlockHash() const
Definition chain.h:198
int64_t GetBlockTime() const
Definition chain.h:221
unsigned int nTx
Definition chain.h:123
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition chain.cpp:110
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition chain.h:106
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition chain.h:163
bool IsWithinSizeConstraints() const
Definition bloom.cpp:89
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition chain.h:396
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition chain.h:416
int Height() const
Return the maximal height in the chain.
Definition chain.h:425
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition chain.h:410
const HeadersSyncParams & HeadersSync() const
const Consensus::Params & GetConsensus() const
Definition chainparams.h:89
void NumToOpenAdd(size_t n)
Increment the number of new connections of type ConnectionType::PRIVATE_BROADCAST to be opened by CCo...
Definition net.cpp:3092
size_t NumToOpenSub(size_t n)
Decrement the number of new connections of type ConnectionType::PRIVATE_BROADCAST to be opened by CCo...
Definition net.cpp:3098
void ForEachNode(const NodeFn &func)
Definition net.h:1269
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition net.cpp:4118
bool GetNetworkActive() const
Definition net.h:1167
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::microseconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition net.cpp:2003
bool GetTryNewOutboundPeer() const
Definition net.cpp:2426
class CConnman::PrivateBroadcast m_private_broadcast
std::vector< CAddress > GetAddresses(CNode &requestor, size_t max_addresses, size_t max_pct)
Return addresses from the per-requestor cache.
Definition net.cpp:3694
void SetTryNewOutboundPeer(bool flag)
Definition net.cpp:2431
int GetExtraBlockRelayCount() const
Definition net.cpp:2476
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition net.cpp:2246
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex)
Definition net.cpp:3911
void StartExtraBlockRelayPeers()
Definition net.cpp:2437
bool DisconnectNode(std::string_view node)
Definition net.cpp:3809
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition net.cpp:4131
uint32_t GetMappedAS(const CNetAddr &addr) const
Definition net.cpp:3792
int GetExtraFullOutboundCount() const
Definition net.cpp:2462
std::vector< CAddress > GetAddressesUnsafe(size_t max_addresses, size_t max_pct, std::optional< Network > network, bool filtered=true) const
Return randomly selected addresses.
Definition net.cpp:3683
bool CheckIncomingNonce(uint64_t nonce)
Definition net.cpp:353
bool GetUseAddrmanOutgoing() const
Definition net.h:1168
RecursiveMutex & GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex)
Fee rate in satoshis per virtualbyte: CAmount / vB the feerate is represented internally as FeeFrac.
Definition feerate.h:32
CAmount GetFeePerK() const
Return the fee in satoshis for a vsize of 1000 vbytes.
Definition feerate.h:62
CAmount GetFee(int32_t virtual_bytes) const
Return the fee in satoshis for the given vsize in vbytes.
Definition feerate.cpp:20
bool IsMsgCmpctBlk() const
Definition protocol.h:511
bool IsMsgBlk() const
Definition protocol.h:508
std::string ToString() const
Definition protocol.cpp:77
bool IsMsgWtx() const
Definition protocol.h:509
bool IsGenTxMsg() const
Definition protocol.h:515
bool IsMsgTx() const
Definition protocol.h:507
bool IsMsgFilteredBlk() const
Definition protocol.h:510
uint256 hash
Definition protocol.h:525
bool IsGenBlkMsg() const
Definition protocol.h:519
bool IsMsgWitnessBlk() const
Definition protocol.h:512
std::vector< std::pair< unsigned int, Txid > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition netaddress.h:218
bool IsRoutable() const
static constexpr SerParams V1
Definition netaddress.h:231
bool IsValid() const
bool IsLocal() const
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Information about a peer.
Definition net.h:680
bool IsFeelerConn() const
Definition net.h:816
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition net.h:713
bool ExpectServicesFromConn() const
Definition net.h:833
std::atomic< int > nVersion
Definition net.h:723
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition net.h:870
bool IsInboundConn() const
Definition net.h:829
bool HasPermission(NetPermissionFlags permission) const
Definition net.h:731
bool IsOutboundOrBlockRelayConn() const
Definition net.h:771
NodeId GetId() const
Definition net.h:914
bool IsManualConn() const
Definition net.h:791
const std::string m_addr_name
Definition net.h:718
std::string ConnectionTypeAsString() const
Definition net.h:968
void SetCommonVersion(int greatest_common_version)
Definition net.h:939
std::atomic< bool > m_bip152_highbandwidth_to
Definition net.h:865
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition net.h:874
std::atomic< bool > m_bip152_highbandwidth_from
Definition net.h:867
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition net.h:987
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition net.h:735
bool IsAddrFetchConn() const
Definition net.h:820
uint64_t GetLocalNonce() const
Definition net.h:918
const CAddress addr
Definition net.h:715
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition net.cpp:594
bool IsBlockOnlyConn() const
Definition net.h:812
int GetCommonVersion() const
Definition net.h:944
bool IsFullOutboundConn() const
Definition net.h:787
Mutex m_subver_mutex
Definition net.h:724
const uint64_t m_network_key
Network key used to prevent fingerprinting our node across networks.
Definition net.h:748
std::atomic_bool fPauseSend
Definition net.h:744
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition net.cpp:4028
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition net.h:878
bool IsPrivateBroadcastConn() const
Definition net.h:824
std::string LogIP(bool log_ip) const
Helper function to optionally log the IP address.
Definition net.cpp:704
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
Definition net.h:684
const NetPermissionFlags m_permission_flags
Definition net.h:686
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition net.h:722
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
Definition net.h:885
std::string DisconnectMsg(bool log_ip) const
Helper function to log disconnects.
Definition net.cpp:709
std::atomic_bool fDisconnect
Definition net.h:738
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
Definition net.h:891
Simple class for background tasks that should be run periodically or once "after a while".
Definition scheduler.h:40
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition scheduler.h:53
std::string ToStringAddrPort() const
std::vector< unsigned char > GetKey() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition siphash.cpp:73
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
Definition siphash.cpp:24
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition txmempool.h:187
bool CompareMiningScoreWithTopology(const Wtxid &hasha, const Wtxid &hashb) const
TxMempoolInfo info_for_relay(const T &id, uint64_t last_sequence) const
Returns info for a transaction if its entry_sequence < last_sequence.
Definition txmempool.h:529
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
Definition txmempool.h:260
CFeeRate GetMinFee(size_t sizelimit) const
CTransactionRef get(const Txid &hash) const
size_t DynamicMemoryUsage() const
const Options m_opts
Definition txmempool.h:303
std::vector< TxMempoolInfo > infoAll() const
TxMempoolInfo info(const T &id) const
Definition txmempool.h:520
bool exists(const Txid &txid) const
Definition txmempool.h:503
uint64_t GetSequence() const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition txmempool.h:574
std::set< Txid > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition txmempool.h:556
unsigned long size() const
Definition txmempool.h:485
void RemoveUnbroadcastTx(const Txid &txid, bool unchecked=false)
Removes a transaction from the unbroadcast set.
void ClearBlockIndexCandidates() EXCLUSIVE_LOCKS_REQUIRED(void PopulateBlockIndexCandidates() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex * FindForkInGlobalIndex(const CBlockLocator &locator) const EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Populate the candidate set by calling TryAddBlockIndexCandidate on all valid block indices.
Interface for managing multiple Chainstate objects, where each chainstate is associated with chainsta...
Definition validation.h:940
bool IsInitialBlockDownload() const noexcept
Check whether we are doing an initial block download (synchronizing from disk or network).
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Chainstate & ActiveChainstate() const
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
bool ProcessNewBlockHeaders(std::span< const CBlockHeader > headers, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void ReportHeadersPresync(int64_t height, int64_t timestamp)
This is used by net_processing to report pre-synchronization progress of headers, as headers are not ...
node::BlockManager m_blockman
bool empty() const
Definition streams.h:168
size_type size() const
Definition streams.h:167
void ignore(size_t num_ignore)
Definition streams.h:221
int in_avail() const
Definition streams.h:201
bool IsWtxid() const
const uint256 & ToUint256() const LIFETIMEBOUND
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition net.h:1031
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing, bool segwit_active)
bool IsTxAvailable(size_t index) const
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< Wtxid, CTransactionRef > > &extra_txn)
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, node::Warnings &warnings, Options opts)
void NodeConfirmedReception(const NodeId &nodeid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Mark that the node has confirmed reception of the transaction we sent it by responding with PONG to o...
std::vector< TxBroadcastInfo > GetBroadcastInfo() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get stats about all transactions currently being privately broadcast.
bool HavePendingTransactions() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if there are transactions that need to be broadcast.
bool DidNodeConfirmReception(const NodeId &nodeid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Check if the node has confirmed reception of the transaction.
std::optional< size_t > Remove(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Forget a transaction.
std::optional< CTransactionRef > PickTxForSend(const NodeId &will_send_to_nodeid, const CService &will_send_to_address) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Pick the transaction with the fewest send attempts, and confirmations, and oldest send/confirm times.
std::optional< CTransactionRef > GetTxForNode(const NodeId &nodeid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get the transaction that was picked for sending to a given node by PickTxForSend().
bool Add(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a transaction to the storage.
std::vector< CTransactionRef > GetStale() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get the transactions that have not been broadcast recently.
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition netbase.h:132
void Add(std::chrono::seconds offset) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new time offset sample.
bool WarnIfOutOfSync() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Raise warnings if the median time offset exceeds the warnings threshold.
std::chrono::seconds Median() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Compute and return the median of the collected time offset samples.
bool IsValid() const
Definition validation.h:105
std::string GetDebugMessage() const
Definition validation.h:110
Result GetResult() const
Definition validation.h:108
std::string ToString() const
Definition validation.h:111
bool IsInvalid() const
Definition validation.h:106
constexpr bool IsNull() const
Definition uint256.h:48
std::string ToString() const
Definition uint256.cpp:21
constexpr void SetNull()
Definition uint256.h:55
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
ReadRawBlockResult ReadRawBlock(const FlatFilePos &pos, std::optional< std::pair< size_t, size_t > > block_part=std::nullopt) const
bool ReadBlock(CBlock &block, const FlatFilePos &pos, const std::optional< uint256 > &expected_hash) const
Functions for disk access for blocks.
bool IsPruneMode() const
Whether running in -prune mode.
Manages warning messages within a node.
Definition warnings.h:40
const uint256 & ToUint256() const LIFETIMEBOUND
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
Definition validation.h:66
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
Definition validation.h:61
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
Definition validation.h:60
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
Definition validation.h:59
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
Definition validation.h:63
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
Definition validation.h:64
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
Definition validation.h:62
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
Definition validation.h:65
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
Definition validation.h:58
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
Definition validation.h:28
@ TX_UNKNOWN
transaction was not validated because package failed
Definition validation.h:49
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
Definition validation.h:47
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
Definition validation.h:24
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition cs_main.cpp:8
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params &params, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
HeadersSyncState::State State
#define LogWarning(...)
Definition log.h:96
#define LogInfo(...)
Definition log.h:95
#define LogError(...)
Definition log.h:97
#define LogDebug(category,...)
Definition log.h:115
bool fLogIPs
Definition logging.cpp:47
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
Definition logging.h:294
unsigned int nonce
@ TXPACKAGES
Definition categories.h:46
@ PRIVBROADCAST
Definition categories.h:48
@ VALIDATION
Definition categories.h:37
@ MEMPOOLREJ
Definition categories.h:32
@ CMPCTBLOCK
Definition categories.h:28
@ MEMPOOL
Definition categories.h:18
@ DEPLOYMENT_SEGWIT
Definition params.h:30
CSerializedNetMsg Make(std::string msg_type, Args &&... args)
constexpr const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
Definition protocol.h:180
constexpr const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition protocol.h:192
constexpr const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition protocol.h:186
constexpr const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition protocol.h:107
constexpr const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition protocol.h:123
constexpr const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition protocol.h:75
constexpr const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition protocol.h:212
constexpr const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
Definition protocol.h:206
constexpr const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition protocol.h:254
constexpr const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition protocol.h:87
constexpr const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition protocol.h:132
constexpr const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition protocol.h:224
constexpr const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition protocol.h:150
constexpr const char * BLOCKTXN
Contains a BlockTransactions.
Definition protocol.h:218
constexpr const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition protocol.h:242
constexpr const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
Definition protocol.h:144
constexpr const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition protocol.h:164
constexpr const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
Definition protocol.h:266
constexpr const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
Definition protocol.h:81
constexpr const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition protocol.h:70
constexpr const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition protocol.h:113
constexpr const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition protocol.h:172
constexpr const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition protocol.h:229
constexpr const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition protocol.h:96
constexpr const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition protocol.h:200
constexpr const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition protocol.h:249
constexpr const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition protocol.h:92
constexpr const char * TX
The tx message transmits a single transaction.
Definition protocol.h:117
constexpr const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition protocol.h:139
constexpr const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition protocol.h:156
constexpr const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition protocol.h:102
constexpr const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition protocol.h:260
constexpr const char * BLOCK
The block message transmits a single serialized block.
Definition protocol.h:127
constexpr const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
Definition protocol.h:237
constexpr const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition protocol.h:65
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
""_hex is a compile-time user-defined literal returning a std::array<std::byte>, equivalent to ParseH...
bool fListen
Definition net.cpp:117
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition net.cpp:120
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition net.cpp:240
std::function< void(const CAddress &addr, const std::string &msg_type, std::span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition net.cpp:4224
bool SeenLocal(const CService &addr)
vote for a local address
Definition net.cpp:318
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition net.h:67
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition net.h:59
int64_t NodeId
Definition net.h:103
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain.
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr size_t NUM_PRIVATE_BROADCAST_PER_TX
For private broadcast, send a transaction to this many peers.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto PRIVATE_BROADCAST_MAX_CONNECTION_LIFETIME
Private broadcast connections must complete within this time.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS
Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers.
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
ReachableNets g_reachable_nets
Definition netbase.cpp:43
bool IsProxy(const CNetAddr &addr)
Definition netbase.cpp:739
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition policy.h:69
static constexpr TransactionSerParams TX_NO_WITNESS
static constexpr TransactionSerParams TX_WITH_WITNESS
std::shared_ptr< const CTransaction > CTransactionRef
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition protocol.cpp:121
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition protocol.h:470
@ MSG_TX
Definition protocol.h:479
@ MSG_WTX
Defined in BIP 339.
Definition protocol.h:481
@ MSG_BLOCK
Definition protocol.h:480
@ MSG_CMPCT_BLOCK
Defined in BIP152.
Definition protocol.h:484
@ MSG_WITNESS_BLOCK
Defined in BIP144.
Definition protocol.h:485
ServiceFlags
nServices flags
Definition protocol.h:309
@ NODE_NONE
Definition protocol.h:312
@ NODE_WITNESS
Definition protocol.h:320
@ NODE_NETWORK_LIMITED
Definition protocol.h:327
@ NODE_BLOOM
Definition protocol.h:317
@ NODE_NETWORK
Definition protocol.h:315
@ NODE_COMPACT_FILTERS
Definition protocol.h:323
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
Definition protocol.h:360
static const int WTXID_RELAY_VERSION
"wtxidrelay" message type for wtxid-based relay starts with this version
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" message type and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition script.h:28
#define LIMITED_STRING(obj, n)
Definition serialize.h:493
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition serialize.h:330
static Wrapper< Formatter, T & > Using(T &&t)
Cause serialization/deserialization of an object to be done using a specified formatter class.
Definition serialize.h:488
constexpr auto MakeUCharSpan(const V &v) -> decltype(UCharSpanCast(std::span{v}))
Like the std::span constructor, but for (const) unsigned char member types only.
Definition span.h:111
std::vector< uint256 > vHave
Definition block.h:127
bool IsNull() const
Definition block.h:145
std::chrono::microseconds m_ping_wait
std::vector< int > vHeightInFlight
CAmount m_fee_filter_received
std::chrono::seconds time_offset
uint64_t m_addr_rate_limited
uint64_t m_last_inv_seq
uint64_t m_addr_processed
ServiceFlags their_services
CSerializedNetMsg Copy() const
Definition net.h:128
int64_t nPowTargetSpacing
Definition params.h:120
bool IsNull() const
Definition flatfile.h:32
const ResultType m_result_type
Result type.
Definition validation.h:140
const TxValidationState m_state
Contains information about why the transaction failed.
Definition validation.h:143
const std::list< CTransactionRef > m_replaced_transactions
Mempool transactions replaced by the tx.
Definition validation.h:146
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition time.cpp:30
std::chrono::time_point< NodeClock > time_point
Definition time.h:19
PackageValidationState m_state
Definition validation.h:238
std::map< Wtxid, MempoolAcceptResult > m_tx_results
Map from wtxid to finished MempoolAcceptResults.
Definition validation.h:245
Information about chainstate that notifications are sent from.
Definition types.h:18
CFeeRate min_relay_feerate
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation).
std::vector< NodeId > m_senders
std::string ToString() const
#define AssertLockNotHeld(cs)
Definition sync.h:141
AnnotatedMixin< std::mutex > Mutex
Wrapped mutex: supports waiting but not recursive locking.
Definition sync.h:123
#define LOCK2(cs1, cs2)
Definition sync.h:259
AnnotatedMixin< std::recursive_mutex > RecursiveMutex
Wrapped mutex: supports recursive locking, but no waiting TODO: We should move away from using the re...
Definition sync.h:120
#define LOCK(cs)
Definition sync.h:258
#define WITH_LOCK(cs, code)
Definition sync.h:289
#define AssertLockHeld(cs)
Definition sync.h:136
COutPoint ProcessBlock(const NodeContext &node, const std::shared_ptr< CBlock > &block)
Returns the generated coin (or Null if the block was invalid).
Definition mining.cpp:106
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define GUARDED_BY(x)
#define LOCKS_EXCLUDED(...)
#define ACQUIRED_BEFORE(...)
#define PT_GUARDED_BY(x)
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
#define TRACEPOINT(context,...)
Definition trace.h:56
#define TRACEPOINT_SEMAPHORE(context, event)
Definition trace.h:54
transaction_identifier< true > Wtxid
Wtxid commits to all transaction fields including the witness.
transaction_identifier< false > Txid
Txid commits to all transaction fields except the witness.
consteval auto _(util::TranslatedLiteral str)
Definition translation.h:79
ReconciliationRegisterResult
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition time.cpp:81
T Now()
Return the current time point cast to the given precision.
Definition time.h:126
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition time.h:90
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition time.h:88
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition time.h:25
constexpr auto Ticks(Dur2 d)
Helper to count the seconds of a duration/time_point.
Definition time.h:73
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept, const std::optional< CFeeRate > &client_maxfeerate)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
bool HasValidProofOfWork(std::span< const CBlockHeader > headers, const Consensus::Params &consensusParams)
Check that the proof of work on each blockheader matches the value in nBits.
arith_uint256 CalculateClaimedHeadersWork(std::span< const CBlockHeader > headers)
Return the sum of the claimed work on a given set of headers.
assert(!tx.IsCoinBase())
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition validation.h:76
@ UNVALIDATED
Blocks after an assumeutxo snapshot have been validated but the snapshot itself has not been validate...
Definition validation.h:531