Bitcoin Core  29.1.0
P2P Digital Currency
vecdeque.cpp
Go to the documentation of this file.
1 // Copyright (c) The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #include <random.h>
6 #include <span.h>
7 #include <test/fuzz/util.h>
8 #include <util/vecdeque.h>
9 
10 #include <deque>
11 #include <stdint.h>
12 
13 namespace {
14 
16 static constexpr size_t MAX_BUFFERS{3};
18 static constexpr size_t MAX_BUFFER_SIZE{48};
20 static constexpr size_t MAX_OPERATIONS{1024};
21 
26 template<typename T, bool CheckNoneLeft>
27 void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak)
28 {
29  FuzzedDataProvider provider(buffer.data(), buffer.size());
30  // Local RNG, only used for the seeds to initialize T objects with.
31  InsecureRandomContext rng(provider.ConsumeIntegral<uint64_t>() ^ rng_tweak);
32 
33  // Real circular buffers.
34  std::vector<VecDeque<T>> real;
35  real.reserve(MAX_BUFFERS);
36  // Simulated circular buffers.
37  std::vector<std::deque<T>> sim;
38  sim.reserve(MAX_BUFFERS);
39  // Temporary object of type T.
40  std::optional<T> tmp;
41 
42  // Compare a real and a simulated buffer.
43  auto compare_fn = [](const VecDeque<T>& r, const std::deque<T>& s) {
44  assert(r.size() == s.size());
45  assert(r.empty() == s.empty());
46  assert(r.capacity() >= r.size());
47  if (s.size() == 0) return;
48  assert(r.front() == s.front());
49  assert(r.back() == s.back());
50  for (size_t i = 0; i < s.size(); ++i) {
51  assert(r[i] == s[i]);
52  }
53  };
54 
55  LIMITED_WHILE(provider.remaining_bytes(), MAX_OPERATIONS) {
56  int command = provider.ConsumeIntegral<uint8_t>() % 64;
57  unsigned idx = real.empty() ? 0 : provider.ConsumeIntegralInRange<unsigned>(0, real.size() - 1);
58  const size_t num_buffers = sim.size();
59  // Pick one operation based on value of command. Not all operations are always applicable.
60  // Loop through the applicable ones until command reaches 0 (which avoids the need to
61  // compute the number of applicable commands ahead of time).
62  const bool non_empty{num_buffers != 0};
63  const bool non_full{num_buffers < MAX_BUFFERS};
64  const bool partially_full{non_empty && non_full};
65  const bool multiple_exist{num_buffers > 1};
66  const bool existing_buffer_non_full{non_empty && sim[idx].size() < MAX_BUFFER_SIZE};
67  const bool existing_buffer_non_empty{non_empty && !sim[idx].empty()};
68  assert(non_full || non_empty);
69  while (true) {
70  if (non_full && command-- == 0) {
71  /* Default construct. */
72  real.emplace_back();
73  sim.emplace_back();
74  break;
75  }
76  if (non_empty && command-- == 0) {
77  /* resize() */
78  compare_fn(real[idx], sim[idx]);
79  size_t new_size = provider.ConsumeIntegralInRange<size_t>(0, MAX_BUFFER_SIZE);
80  real[idx].resize(new_size);
81  sim[idx].resize(new_size);
82  assert(real[idx].size() == new_size);
83  break;
84  }
85  if (non_empty && command-- == 0) {
86  /* clear() */
87  compare_fn(real[idx], sim[idx]);
88  real[idx].clear();
89  sim[idx].clear();
90  assert(real[idx].empty());
91  break;
92  }
93  if (non_empty && command-- == 0) {
94  /* Copy construct default. */
95  compare_fn(real[idx], sim[idx]);
96  real[idx] = VecDeque<T>();
97  sim[idx].clear();
98  assert(real[idx].size() == 0);
99  break;
100  }
101  if (non_empty && command-- == 0) {
102  /* Destruct. */
103  compare_fn(real.back(), sim.back());
104  real.pop_back();
105  sim.pop_back();
106  break;
107  }
108  if (partially_full && command-- == 0) {
109  /* Copy construct. */
110  real.emplace_back(real[idx]);
111  sim.emplace_back(sim[idx]);
112  break;
113  }
114  if (partially_full && command-- == 0) {
115  /* Move construct. */
116  VecDeque<T> copy(real[idx]);
117  real.emplace_back(std::move(copy));
118  sim.emplace_back(sim[idx]);
119  break;
120  }
121  if (multiple_exist && command-- == 0) {
122  /* swap() */
123  swap(real[idx], real[(idx + 1) % num_buffers]);
124  swap(sim[idx], sim[(idx + 1) % num_buffers]);
125  break;
126  }
127  if (multiple_exist && command-- == 0) {
128  /* Copy assign. */
129  compare_fn(real[idx], sim[idx]);
130  real[idx] = real[(idx + 1) % num_buffers];
131  sim[idx] = sim[(idx + 1) % num_buffers];
132  break;
133  }
134  if (multiple_exist && command-- == 0) {
135  /* Move assign. */
136  VecDeque<T> copy(real[(idx + 1) % num_buffers]);
137  compare_fn(real[idx], sim[idx]);
138  real[idx] = std::move(copy);
139  sim[idx] = sim[(idx + 1) % num_buffers];
140  break;
141  }
142  if (non_empty && command-- == 0) {
143  /* Self swap() */
144  swap(real[idx], real[idx]);
145  break;
146  }
147  if (non_empty && command-- == 0) {
148  /* Self-copy assign. */
149  real[idx] = real[idx];
150  break;
151  }
152  if (non_empty && command-- == 0) {
153  /* Self-move assign. */
154  // Do not use std::move(real[idx]) here: -Wself-move correctly warns about that.
155  real[idx] = static_cast<VecDeque<T>&&>(real[idx]);
156  break;
157  }
158  if (non_empty && command-- == 0) {
159  /* reserve() */
160  size_t res_size = provider.ConsumeIntegralInRange<size_t>(0, MAX_BUFFER_SIZE);
161  size_t old_cap = real[idx].capacity();
162  size_t old_size = real[idx].size();
163  real[idx].reserve(res_size);
164  assert(real[idx].size() == old_size);
165  assert(real[idx].capacity() == std::max(old_cap, res_size));
166  break;
167  }
168  if (non_empty && command-- == 0) {
169  /* shrink_to_fit() */
170  size_t old_size = real[idx].size();
171  real[idx].shrink_to_fit();
172  assert(real[idx].size() == old_size);
173  assert(real[idx].capacity() == old_size);
174  break;
175  }
176  if (existing_buffer_non_full && command-- == 0) {
177  /* push_back() (copying) */
178  tmp = T(rng.rand64());
179  size_t old_size = real[idx].size();
180  size_t old_cap = real[idx].capacity();
181  real[idx].push_back(*tmp);
182  sim[idx].push_back(*tmp);
183  assert(real[idx].size() == old_size + 1);
184  if (old_cap > old_size) {
185  assert(real[idx].capacity() == old_cap);
186  } else {
187  assert(real[idx].capacity() > old_cap);
188  assert(real[idx].capacity() <= 2 * (old_cap + 1));
189  }
190  break;
191  }
192  if (existing_buffer_non_full && command-- == 0) {
193  /* push_back() (moving) */
194  tmp = T(rng.rand64());
195  size_t old_size = real[idx].size();
196  size_t old_cap = real[idx].capacity();
197  sim[idx].push_back(*tmp);
198  real[idx].push_back(std::move(*tmp));
199  assert(real[idx].size() == old_size + 1);
200  if (old_cap > old_size) {
201  assert(real[idx].capacity() == old_cap);
202  } else {
203  assert(real[idx].capacity() > old_cap);
204  assert(real[idx].capacity() <= 2 * (old_cap + 1));
205  }
206  break;
207  }
208  if (existing_buffer_non_full && command-- == 0) {
209  /* emplace_back() */
210  uint64_t seed{rng.rand64()};
211  size_t old_size = real[idx].size();
212  size_t old_cap = real[idx].capacity();
213  sim[idx].emplace_back(seed);
214  real[idx].emplace_back(seed);
215  assert(real[idx].size() == old_size + 1);
216  if (old_cap > old_size) {
217  assert(real[idx].capacity() == old_cap);
218  } else {
219  assert(real[idx].capacity() > old_cap);
220  assert(real[idx].capacity() <= 2 * (old_cap + 1));
221  }
222  break;
223  }
224  if (existing_buffer_non_full && command-- == 0) {
225  /* push_front() (copying) */
226  tmp = T(rng.rand64());
227  size_t old_size = real[idx].size();
228  size_t old_cap = real[idx].capacity();
229  real[idx].push_front(*tmp);
230  sim[idx].push_front(*tmp);
231  assert(real[idx].size() == old_size + 1);
232  if (old_cap > old_size) {
233  assert(real[idx].capacity() == old_cap);
234  } else {
235  assert(real[idx].capacity() > old_cap);
236  assert(real[idx].capacity() <= 2 * (old_cap + 1));
237  }
238  break;
239  }
240  if (existing_buffer_non_full && command-- == 0) {
241  /* push_front() (moving) */
242  tmp = T(rng.rand64());
243  size_t old_size = real[idx].size();
244  size_t old_cap = real[idx].capacity();
245  sim[idx].push_front(*tmp);
246  real[idx].push_front(std::move(*tmp));
247  assert(real[idx].size() == old_size + 1);
248  if (old_cap > old_size) {
249  assert(real[idx].capacity() == old_cap);
250  } else {
251  assert(real[idx].capacity() > old_cap);
252  assert(real[idx].capacity() <= 2 * (old_cap + 1));
253  }
254  break;
255  }
256  if (existing_buffer_non_full && command-- == 0) {
257  /* emplace_front() */
258  uint64_t seed{rng.rand64()};
259  size_t old_size = real[idx].size();
260  size_t old_cap = real[idx].capacity();
261  sim[idx].emplace_front(seed);
262  real[idx].emplace_front(seed);
263  assert(real[idx].size() == old_size + 1);
264  if (old_cap > old_size) {
265  assert(real[idx].capacity() == old_cap);
266  } else {
267  assert(real[idx].capacity() > old_cap);
268  assert(real[idx].capacity() <= 2 * (old_cap + 1));
269  }
270  break;
271  }
272  if (existing_buffer_non_empty && command-- == 0) {
273  /* front() [modifying] */
274  tmp = T(rng.rand64());
275  size_t old_size = real[idx].size();
276  assert(sim[idx].front() == real[idx].front());
277  sim[idx].front() = *tmp;
278  real[idx].front() = std::move(*tmp);
279  assert(real[idx].size() == old_size);
280  break;
281  }
282  if (existing_buffer_non_empty && command-- == 0) {
283  /* back() [modifying] */
284  tmp = T(rng.rand64());
285  size_t old_size = real[idx].size();
286  assert(sim[idx].back() == real[idx].back());
287  sim[idx].back() = *tmp;
288  real[idx].back() = *tmp;
289  assert(real[idx].size() == old_size);
290  break;
291  }
292  if (existing_buffer_non_empty && command-- == 0) {
293  /* operator[] [modifying] */
294  tmp = T(rng.rand64());
295  size_t pos = provider.ConsumeIntegralInRange<size_t>(0, sim[idx].size() - 1);
296  size_t old_size = real[idx].size();
297  assert(sim[idx][pos] == real[idx][pos]);
298  sim[idx][pos] = *tmp;
299  real[idx][pos] = std::move(*tmp);
300  assert(real[idx].size() == old_size);
301  break;
302  }
303  if (existing_buffer_non_empty && command-- == 0) {
304  /* pop_front() */
305  assert(sim[idx].front() == real[idx].front());
306  size_t old_size = real[idx].size();
307  sim[idx].pop_front();
308  real[idx].pop_front();
309  assert(real[idx].size() == old_size - 1);
310  break;
311  }
312  if (existing_buffer_non_empty && command-- == 0) {
313  /* pop_back() */
314  assert(sim[idx].back() == real[idx].back());
315  size_t old_size = real[idx].size();
316  sim[idx].pop_back();
317  real[idx].pop_back();
318  assert(real[idx].size() == old_size - 1);
319  break;
320  }
321  }
322  }
323 
324  /* Fully compare the final state. */
325  for (unsigned i = 0; i < sim.size(); ++i) {
326  // Make sure const getters work.
327  const VecDeque<T>& realbuf = real[i];
328  const std::deque<T>& simbuf = sim[i];
329  compare_fn(realbuf, simbuf);
330  for (unsigned j = 0; j < sim.size(); ++j) {
331  assert((realbuf == real[j]) == (simbuf == sim[j]));
332  assert(((realbuf <=> real[j]) >= 0) == (simbuf >= sim[j]));
333  assert(((realbuf <=> real[j]) <= 0) == (simbuf <= sim[j]));
334  }
335  // Clear out the buffers so we can check below that no objects exist anymore.
336  sim[i].clear();
337  real[i].clear();
338  }
339 
340  if constexpr (CheckNoneLeft) {
341  tmp = std::nullopt;
342  T::CheckNoneExist();
343  }
344 }
345 
347 template<size_t Size>
348 class TrackedObj
349 {
350  static_assert(Size > 0);
351 
352  /* Data type for map that actually stores the object data.
353  *
354  * The key is a pointer to the TrackedObj, the value is the uint64_t it was initialized with.
355  * Default-constructed and moved-from objects hold an std::nullopt.
356  */
357  using track_map_type = std::map<const TrackedObj<Size>*, std::optional<uint64_t>>;
358 
359 private:
360 
362  static inline track_map_type g_tracker;
363 
368  typename track_map_type::iterator m_track_entry[Size];
369 
370  void Check() const
371  {
372  auto it = g_tracker.find(this);
373  for (size_t i = 0; i < Size; ++i) {
374  assert(m_track_entry[i] == it);
375  }
376  }
377 
379  void Register()
380  {
381  auto [it, inserted] = g_tracker.emplace(this, std::nullopt);
382  assert(inserted);
383  for (size_t i = 0; i < Size; ++i) {
384  m_track_entry[i] = it;
385  }
386  }
387 
388  void Deregister()
389  {
390  Check();
391  assert(m_track_entry[0] != g_tracker.end());
392  g_tracker.erase(m_track_entry[0]);
393  for (size_t i = 0; i < Size; ++i) {
394  m_track_entry[i] = g_tracker.end();
395  }
396  }
397 
399  std::optional<uint64_t>& Deref()
400  {
401  Check();
402  assert(m_track_entry[0] != g_tracker.end());
403  return m_track_entry[0]->second;
404  }
405 
407  const std::optional<uint64_t>& Deref() const
408  {
409  Check();
410  assert(m_track_entry[0] != g_tracker.end());
411  return m_track_entry[0]->second;
412  }
413 
414 public:
415  ~TrackedObj() { Deregister(); }
416  TrackedObj() { Register(); }
417 
418  TrackedObj(uint64_t value)
419  {
420  Register();
421  Deref() = value;
422  }
423 
424  TrackedObj(const TrackedObj& other)
425  {
426  Register();
427  Deref() = other.Deref();
428  }
429 
430  TrackedObj(TrackedObj&& other)
431  {
432  Register();
433  Deref() = other.Deref();
434  other.Deref() = std::nullopt;
435  }
436 
437  TrackedObj& operator=(const TrackedObj& other)
438  {
439  if (this == &other) return *this;
440  Deref() = other.Deref();
441  return *this;
442  }
443 
444  TrackedObj& operator=(TrackedObj&& other)
445  {
446  if (this == &other) return *this;
447  Deref() = other.Deref();
448  other.Deref() = std::nullopt;
449  return *this;
450  }
451 
452  friend bool operator==(const TrackedObj& a, const TrackedObj& b)
453  {
454  return a.Deref() == b.Deref();
455  }
456 
457  friend std::strong_ordering operator<=>(const TrackedObj& a, const TrackedObj& b)
458  {
459  // Libc++ 15 & 16 do not support std::optional<T>::operator<=> yet. See
460  // https://reviews.llvm.org/D146392.
461  if (!a.Deref().has_value() || !b.Deref().has_value()) {
462  return a.Deref().has_value() <=> b.Deref().has_value();
463  }
464  return *a.Deref() <=> *b.Deref();
465  }
466 
467  static void CheckNoneExist()
468  {
469  assert(g_tracker.empty());
470  }
471 };
472 
473 } // namespace
474 
475 FUZZ_TARGET(vecdeque)
476 {
477  // Run the test with simple uints (which satisfy all the trivial properties).
478  static_assert(std::is_trivially_copyable_v<uint32_t>);
479  static_assert(std::is_trivially_destructible_v<uint64_t>);
480  TestType<uint8_t, false>(buffer, 1);
481  TestType<uint16_t, false>(buffer, 2);
482  TestType<uint32_t, false>(buffer, 3);
483  TestType<uint64_t, false>(buffer, 4);
484 
485  // Run the test with TrackedObjs (which do not).
486  static_assert(!std::is_trivially_copyable_v<TrackedObj<3>>);
487  static_assert(!std::is_trivially_destructible_v<TrackedObj<17>>);
488  TestType<TrackedObj<1>, true>(buffer, 5);
489  TestType<TrackedObj<3>, true>(buffer, 6);
490  TestType<TrackedObj<17>, true>(buffer, 7);
491 }
size_t size() const noexcept
Get the number of elements in this deque.
Definition: vecdeque.h:312
assert(!tx.IsCoinBase())
bool operator==(const CNetAddr &a, const CNetAddr &b)
Definition: netaddress.cpp:607
Data structure largely mimicking std::deque, but using single preallocated ring buffer.
Definition: vecdeque.h:24
T & front() noexcept
Get a mutable reference to the first element of the deque.
Definition: vecdeque.h:268
constexpr std::size_t size() const noexcept
Definition: span.h:187
T & back() noexcept
Get a mutable reference to the last element of the deque.
Definition: vecdeque.h:282
#define LIMITED_WHILE(condition, limit)
Can be used to limit a theoretically unbounded loop.
Definition: fuzz.h:22
FUZZ_TARGET(vecdeque)
Definition: vecdeque.cpp:475
bool empty() const noexcept
Test whether the contents of this deque is empty.
Definition: vecdeque.h:310
xoroshiro128++ PRNG.
Definition: random.h:415
const auto command
constexpr C * data() const noexcept
Definition: span.h:174
void clear() noexcept
Resize the deque to be size 0.
Definition: vecdeque.h:126
A Span is an object that can refer to a contiguous sequence of objects.
Definition: span.h:97
size_t capacity() const noexcept
Get the capacity of this deque (maximum size it can have without reallocating).
Definition: vecdeque.h:314
#define T(expected, seed, data)