198 std::vector<uint256> hashes;
200 size_t bytes = megabytes * (1 << 20);
201 set.setup_bytes(bytes);
202 uint32_t n_insert =
static_cast<uint32_t
>(load * (bytes /
sizeof(
uint256)));
203 hashes.resize(n_insert);
204 for (uint32_t i = 0; i < n_insert; ++i) {
205 uint32_t* ptr = (uint32_t*)hashes[i].begin();
206 for (uint8_t j = 0; j < 8; ++j)
207 *(ptr++) =
m_rng.rand32();
213 std::vector<uint256> hashes_insert_copy = hashes;
214 std::shared_mutex mtx;
218 std::unique_lock<std::shared_mutex> l(mtx);
220 for (uint32_t i = 0; i < (n_insert / 2); ++i)
221 set.insert(hashes_insert_copy[i]);
226 std::vector<std::thread> threads;
229 for (uint32_t x = 0; x < 3; ++x)
232 threads.emplace_back([&, x] {
233 std::shared_lock<std::shared_mutex> l(mtx);
234 size_t ntodo = (n_insert/4)/3;
235 size_t start = ntodo*x;
236 size_t end = ntodo*(x+1);
237 for (uint32_t i = start; i < end; ++i) {
238 bool contains = set.contains(hashes[i], true);
245 for (std::thread&
t : threads)
248 std::unique_lock<std::shared_mutex> l(mtx);
250 for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
251 set.insert(hashes_insert_copy[i]);
254 size_t count_erased_but_contained = 0;
256 size_t count_stale = 0;
258 size_t count_fresh = 0;
260 for (uint32_t i = 0; i < (n_insert / 4); ++i)
261 count_erased_but_contained += set.contains(hashes[i],
false);
262 for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
263 count_stale += set.contains(hashes[i],
false);
264 for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
265 count_fresh += set.contains(hashes[i],
false);
267 double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
268 double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
269 double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
275 BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);