Monero
Loading...
Searching...
No Matches
variant4_random_math.h
Go to the documentation of this file.
1#ifndef VARIANT4_RANDOM_MATH_H
2#define VARIANT4_RANDOM_MATH_H
3
4// Register size can be configured to either 32 bit (uint32_t) or 64 bit (uint64_t)
6
8{
9 // Generate code with minimal theoretical latency = 45 cycles, which is equivalent to 15 multiplications
10 TOTAL_LATENCY = 15 * 3,
11
12 // Always generate at least 60 instructions
14
15 // Never generate more than 70 instructions (final RET instruction doesn't count here)
17
18 // Available ALUs for MUL
19 // Modern CPUs typically have only 1 ALU which can do multiplications
21
22 // Total available ALUs
23 // Modern CPUs have 4 ALUs, but we use only 3 because random math executes together with other main loop code
25};
26
28{
29 MUL, // a*b
30 ADD, // a+b + C, C is an unsigned 32-bit constant
31 SUB, // a-b
32 ROR, // rotate right "a" by "b & 31" bits
33 ROL, // rotate left "a" by "b & 31" bits
34 XOR, // a^b
35 RET, // finish execution
37};
38
39// V4_InstructionDefinition is used to generate code from random data
40// Every random sequence of bytes is a valid code
41//
42// There are 9 registers in total:
43// - 4 variable registers
44// - 5 constant registers initialized from loop variables
45// This is why dst_index is 2 bits
52
60
61#ifndef FORCEINLINE
62#if defined(__GNUC__)
63#define FORCEINLINE __attribute__((always_inline)) inline
64#elif defined(_MSC_VER)
65#define FORCEINLINE __forceinline
66#else
67#define FORCEINLINE inline
68#endif
69#endif
70
71#ifndef UNREACHABLE_CODE
72#if defined(__GNUC__)
73#define UNREACHABLE_CODE __builtin_unreachable()
74#elif defined(_MSC_VER)
75#define UNREACHABLE_CODE __assume(false)
76#else
77#define UNREACHABLE_CODE
78#endif
79#endif
80
81// Random math interpreter's loop is fully unrolled and inlined to achieve 100% branch prediction on CPU:
82// every switch-case will point to the same destination on every iteration of Cryptonight main loop
83//
84// This is about as fast as it can get without using low-level machine code generation
85static FORCEINLINE void v4_random_math(const struct V4_Instruction* code, v4_reg* r)
86{
87 enum
88 {
89 REG_BITS = sizeof(v4_reg) * 8,
90 };
91
92#define V4_EXEC(i) \
93 { \
94 const struct V4_Instruction* op = code + i; \
95 const v4_reg src = r[op->src_index]; \
96 v4_reg* dst = r + op->dst_index; \
97 switch (op->opcode) \
98 { \
99 case MUL: \
100 *dst *= src; \
101 break; \
102 case ADD: \
103 *dst += src + op->C; \
104 break; \
105 case SUB: \
106 *dst -= src; \
107 break; \
108 case ROR: \
109 { \
110 const uint32_t shift = src % REG_BITS; \
111 *dst = (*dst >> shift) | (*dst << ((REG_BITS - shift) % REG_BITS)); \
112 } \
113 break; \
114 case ROL: \
115 { \
116 const uint32_t shift = src % REG_BITS; \
117 *dst = (*dst << shift) | (*dst >> ((REG_BITS - shift) % REG_BITS)); \
118 } \
119 break; \
120 case XOR: \
121 *dst ^= src; \
122 break; \
123 case RET: \
124 return; \
125 default: \
126 UNREACHABLE_CODE; \
127 break; \
128 } \
129 }
130
131#define V4_EXEC_10(j) \
132 V4_EXEC(j + 0) \
133 V4_EXEC(j + 1) \
134 V4_EXEC(j + 2) \
135 V4_EXEC(j + 3) \
136 V4_EXEC(j + 4) \
137 V4_EXEC(j + 5) \
138 V4_EXEC(j + 6) \
139 V4_EXEC(j + 7) \
140 V4_EXEC(j + 8) \
141 V4_EXEC(j + 9)
142
143 // Generated program can have 60 + a few more (usually 2-3) instructions to achieve required latency
144 // I've checked all block heights < 10,000,000 and here is the distribution of program sizes:
145 //
146 // 60 27960
147 // 61 105054
148 // 62 2452759
149 // 63 5115997
150 // 64 1022269
151 // 65 1109635
152 // 66 153145
153 // 67 8550
154 // 68 4529
155 // 69 102
156
157 // Unroll 70 instructions here
158 V4_EXEC_10(0); // instructions 0-9
159 V4_EXEC_10(10); // instructions 10-19
160 V4_EXEC_10(20); // instructions 20-29
161 V4_EXEC_10(30); // instructions 30-39
162 V4_EXEC_10(40); // instructions 40-49
163 V4_EXEC_10(50); // instructions 50-59
164 V4_EXEC_10(60); // instructions 60-69
165
166#undef V4_EXEC_10
167#undef V4_EXEC
168}
169
170// If we don't have enough data available, generate more
171static FORCEINLINE void check_data(size_t* data_index, const size_t bytes_needed, int8_t* data, const size_t data_size)
172{
173 if (*data_index + bytes_needed > data_size)
174 {
175 hash_extra_blake(data, data_size, (char*) data);
176 *data_index = 0;
177 }
178}
179
180// Generates as many random math operations as possible with given latency and ALU restrictions
181// "code" array must have space for NUM_INSTRUCTIONS_MAX+1 instructions
182static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_t height)
183{
184 // MUL is 3 cycles, 3-way addition and rotations are 2 cycles, SUB/XOR are 1 cycle
185 // These latencies match real-life instruction latencies for Intel CPUs starting from Sandy Bridge and up to Skylake/Coffee lake
186 //
187 // AMD Ryzen has the same latencies except 1-cycle ROR/ROL, so it'll be a bit faster than Intel Sandy Bridge and newer processors
188 // Surprisingly, Intel Nehalem also has 1-cycle ROR/ROL, so it'll also be faster than Intel Sandy Bridge and newer processors
189 // AMD Bulldozer has 4 cycles latency for MUL (slower than Intel) and 1 cycle for ROR/ROL (faster than Intel), so average performance will be the same
190 // Source: https://www.agner.org/optimize/instruction_tables.pdf
191 const int op_latency[V4_INSTRUCTION_COUNT] = { 3, 2, 1, 2, 2, 1 };
192
193 // Instruction latencies for theoretical ASIC implementation
194 const int asic_op_latency[V4_INSTRUCTION_COUNT] = { 3, 1, 1, 1, 1, 1 };
195
196 // Available ALUs for each instruction
198
199 int8_t data[32];
200 memset(data, 0, sizeof(data));
201 uint64_t tmp = SWAP64LE(height);
202 memcpy(data, &tmp, sizeof(uint64_t));
203 data[20] = -38; // change seed
204
205 // Set data_index past the last byte in data
206 // to trigger full data update with blake hash
207 // before we start using it
208 size_t data_index = sizeof(data);
209
210 int code_size;
211
212 // There is a small chance (1.8%) that register R8 won't be used in the generated program
213 // So we keep track of it and try again if it's not used
214 bool r8_used;
215 do {
216 int latency[9];
217 int asic_latency[9];
218
219 // Tracks previous instruction and value of the source operand for registers R0-R3 throughout code execution
220 // byte 0: current value of the destination register
221 // byte 1: instruction opcode
222 // byte 2: current value of the source register
223 //
224 // Registers R4-R8 are constant and are treated as having the same value because when we do
225 // the same operation twice with two constant source registers, it can be optimized into a single operation
226 uint32_t inst_data[9] = { 0, 1, 2, 3, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF };
227
228 bool alu_busy[TOTAL_LATENCY + 1][ALU_COUNT];
229 bool is_rotation[V4_INSTRUCTION_COUNT];
230 bool rotated[4];
231 int rotate_count = 0;
232
233 memset(latency, 0, sizeof(latency));
234 memset(asic_latency, 0, sizeof(asic_latency));
235 memset(alu_busy, 0, sizeof(alu_busy));
236 memset(is_rotation, 0, sizeof(is_rotation));
237 memset(rotated, 0, sizeof(rotated));
238 is_rotation[ROR] = true;
239 is_rotation[ROL] = true;
240
241 int num_retries = 0;
242 code_size = 0;
243
244 int total_iterations = 0;
245 r8_used = false;
246
247 // Generate random code to achieve minimal required latency for our abstract CPU
248 // Try to get this latency for all 4 registers
249 while (((latency[0] < TOTAL_LATENCY) || (latency[1] < TOTAL_LATENCY) || (latency[2] < TOTAL_LATENCY) || (latency[3] < TOTAL_LATENCY)) && (num_retries < 64))
250 {
251 // Fail-safe to guarantee loop termination
252 ++total_iterations;
253 if (total_iterations > 256)
254 break;
255
256 check_data(&data_index, 1, data, sizeof(data));
257
258 const uint8_t c = ((uint8_t*)data)[data_index++];
259
260 // MUL = opcodes 0-2
261 // ADD = opcode 3
262 // SUB = opcode 4
263 // ROR/ROL = opcode 5, shift direction is selected randomly
264 // XOR = opcodes 6-7
265 uint8_t opcode = c & ((1 << V4_OPCODE_BITS) - 1);
266 if (opcode == 5)
267 {
268 check_data(&data_index, 1, data, sizeof(data));
269 opcode = (data[data_index++] >= 0) ? ROR : ROL;
270 }
271 else if (opcode >= 6)
272 {
273 opcode = XOR;
274 }
275 else
276 {
277 opcode = (opcode <= 2) ? MUL : (opcode - 2);
278 }
279
280 uint8_t dst_index = (c >> V4_OPCODE_BITS) & ((1 << V4_DST_INDEX_BITS) - 1);
281 uint8_t src_index = (c >> (V4_OPCODE_BITS + V4_DST_INDEX_BITS)) & ((1 << V4_SRC_INDEX_BITS) - 1);
282
283 const int a = dst_index;
284 int b = src_index;
285
286 // Don't do ADD/SUB/XOR with the same register
287 if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b))
288 {
289 // Use register R8 as source instead
290 b = 8;
291 src_index = 8;
292 }
293
294 // Don't do rotation with the same destination twice because it's equal to a single rotation
295 if (is_rotation[opcode] && rotated[a])
296 {
297 continue;
298 }
299
300 // Don't do the same instruction (except MUL) with the same source value twice because all other cases can be optimized:
301 // 2xADD(a, b, C) = ADD(a, b*2, C1+C2), same for SUB and rotations
302 // 2xXOR(a, b) = NOP
303 if ((opcode != MUL) && ((inst_data[a] & 0xFFFF00) == (opcode << 8) + ((inst_data[b] & 255) << 16)))
304 {
305 continue;
306 }
307
308 // Find which ALU is available (and when) for this instruction
309 int next_latency = (latency[a] > latency[b]) ? latency[a] : latency[b];
310 int alu_index = -1;
311 while (next_latency < TOTAL_LATENCY)
312 {
313 for (int i = op_ALUs[opcode] - 1; i >= 0; --i)
314 {
315 if (!alu_busy[next_latency][i])
316 {
317 // ADD is implemented as two 1-cycle instructions on a real CPU, so do an additional availability check
318 if ((opcode == ADD) && alu_busy[next_latency + 1][i])
319 {
320 continue;
321 }
322
323 // Rotation can only start when previous rotation is finished, so do an additional availability check
324 if (is_rotation[opcode] && (next_latency < rotate_count * op_latency[opcode]))
325 {
326 continue;
327 }
328
329 alu_index = i;
330 break;
331 }
332 }
333 if (alu_index >= 0)
334 {
335 break;
336 }
337 ++next_latency;
338 }
339
340 // Don't generate instructions that leave some register unchanged for more than 7 cycles
341 if (next_latency > latency[a] + 7)
342 {
343 continue;
344 }
345
346 next_latency += op_latency[opcode];
347
348 if (next_latency <= TOTAL_LATENCY)
349 {
350 if (is_rotation[opcode])
351 {
352 ++rotate_count;
353 }
354
355 // Mark ALU as busy only for the first cycle when it starts executing the instruction because ALUs are fully pipelined
356 alu_busy[next_latency - op_latency[opcode]][alu_index] = true;
357 latency[a] = next_latency;
358
359 // ASIC is supposed to have enough ALUs to run as many independent instructions per cycle as possible, so latency calculation for ASIC is simple
360 asic_latency[a] = ((asic_latency[a] > asic_latency[b]) ? asic_latency[a] : asic_latency[b]) + asic_op_latency[opcode];
361
362 rotated[a] = is_rotation[opcode];
363
364 inst_data[a] = code_size + (opcode << 8) + ((inst_data[b] & 255) << 16);
365
366 code[code_size].opcode = opcode;
367 code[code_size].dst_index = dst_index;
368 code[code_size].src_index = src_index;
369 code[code_size].C = 0;
370
371 if (src_index == 8)
372 {
373 r8_used = true;
374 }
375
376 if (opcode == ADD)
377 {
378 // ADD instruction is implemented as two 1-cycle instructions on a real CPU, so mark ALU as busy for the next cycle too
379 alu_busy[next_latency - op_latency[opcode] + 1][alu_index] = true;
380
381 // ADD instruction requires 4 more random bytes for 32-bit constant "C" in "a = a + b + C"
382 check_data(&data_index, sizeof(uint32_t), data, sizeof(data));
383 uint32_t t;
384 memcpy(&t, data + data_index, sizeof(uint32_t));
385 code[code_size].C = SWAP32LE(t);
386 data_index += sizeof(uint32_t);
387 }
388
389 ++code_size;
390 if (code_size >= NUM_INSTRUCTIONS_MIN)
391 {
392 break;
393 }
394 }
395 else
396 {
397 ++num_retries;
398 }
399 }
400
401 // ASIC has more execution resources and can extract as much parallelism from the code as possible
402 // We need to add a few more MUL and ROR instructions to achieve minimal required latency for ASIC
403 // Get this latency for at least 1 of the 4 registers
404 const int prev_code_size = code_size;
405 while ((code_size < NUM_INSTRUCTIONS_MAX) && (asic_latency[0] < TOTAL_LATENCY) && (asic_latency[1] < TOTAL_LATENCY) && (asic_latency[2] < TOTAL_LATENCY) && (asic_latency[3] < TOTAL_LATENCY))
406 {
407 int min_idx = 0;
408 int max_idx = 0;
409 for (int i = 1; i < 4; ++i)
410 {
411 if (asic_latency[i] < asic_latency[min_idx]) min_idx = i;
412 if (asic_latency[i] > asic_latency[max_idx]) max_idx = i;
413 }
414
415 const uint8_t pattern[3] = { ROR, MUL, MUL };
416 const uint8_t opcode = pattern[(code_size - prev_code_size) % 3];
417 latency[min_idx] = latency[max_idx] + op_latency[opcode];
418 asic_latency[min_idx] = asic_latency[max_idx] + asic_op_latency[opcode];
419
420 code[code_size].opcode = opcode;
421 code[code_size].dst_index = min_idx;
422 code[code_size].src_index = max_idx;
423 code[code_size].C = 0;
424 ++code_size;
425 }
426
427 // There is ~98.15% chance that loop condition is false, so this loop will execute only 1 iteration most of the time
428 // It never does more than 4 iterations for all block heights < 10,000,000
429 } while (!r8_used || (code_size < NUM_INSTRUCTIONS_MIN) || (code_size > NUM_INSTRUCTIONS_MAX));
430
431 // It's guaranteed that NUM_INSTRUCTIONS_MIN <= code_size <= NUM_INSTRUCTIONS_MAX here
432 // Add final instruction to stop the interpreter
433 code[code_size].opcode = RET;
434 code[code_size].dst_index = 0;
435 code[code_size].src_index = 0;
436 code[code_size].C = 0;
437
438 return code_size;
439}
440
441#endif
cryptonote::block b
Definition block.cpp:40
#define XOR(v, w)
Definition chacha.c:31
void * memcpy(void *a, const void *b, size_t c)
Definition glibc_compat.cpp:16
void hash_extra_blake(const void *data, size_t length, char *hash)
Definition hash-extra-blake.c:36
#define SWAP64LE
Definition int-util.h:285
#define SWAP32LE
Definition int-util.h:277
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
Definition pointer.h:1124
const portMappingElt code
Definition portlistingparse.c:22
unsigned int uint32_t
Definition stdint.h:126
unsigned char uint8_t
Definition stdint.h:124
unsigned __int64 uint64_t
Definition stdint.h:136
signed char int8_t
Definition stdint.h:121
Definition variant4_random_math.h:54
uint8_t dst_index
Definition variant4_random_math.h:56
uint32_t C
Definition variant4_random_math.h:58
uint8_t src_index
Definition variant4_random_math.h:57
uint8_t opcode
Definition variant4_random_math.h:55
std::string data
Definition base58.cpp:37
#define ADD(v, h, a)
static FORCEINLINE void check_data(size_t *data_index, const size_t bytes_needed, int8_t *data, const size_t data_size)
Definition variant4_random_math.h:171
#define V4_EXEC_10(j)
uint32_t v4_reg
Definition variant4_random_math.h:5
#define FORCEINLINE
Definition variant4_random_math.h:67
V4_InstructionList
Definition variant4_random_math.h:28
@ MUL
Definition variant4_random_math.h:29
@ SUB
Definition variant4_random_math.h:31
@ ROL
Definition variant4_random_math.h:33
@ ROR
Definition variant4_random_math.h:32
@ V4_INSTRUCTION_COUNT
Definition variant4_random_math.h:36
@ RET
Definition variant4_random_math.h:35
static FORCEINLINE void v4_random_math(const struct V4_Instruction *code, v4_reg *r)
Definition variant4_random_math.h:85
V4_Settings
Definition variant4_random_math.h:8
@ NUM_INSTRUCTIONS_MAX
Definition variant4_random_math.h:16
@ TOTAL_LATENCY
Definition variant4_random_math.h:10
@ NUM_INSTRUCTIONS_MIN
Definition variant4_random_math.h:13
@ ALU_COUNT_MUL
Definition variant4_random_math.h:20
@ ALU_COUNT
Definition variant4_random_math.h:24
static int v4_random_math_init(struct V4_Instruction *code, const uint64_t height)
Definition variant4_random_math.h:182
V4_InstructionDefinition
Definition variant4_random_math.h:47
@ V4_DST_INDEX_BITS
Definition variant4_random_math.h:49
@ V4_SRC_INDEX_BITS
Definition variant4_random_math.h:50
@ V4_OPCODE_BITS
Definition variant4_random_math.h:48