TBCI Numerical high perf. C++ Library  2.8.0
Macros
unroll_prefetch_def2.h File Reference

macros for composing unrolled prefetching loops over arrays. More...

Go to the source code of this file.

Macros

#define LCTYPE(T)   REGISTER typename tbci_traits<T>::loop_const_refval_type
 Shortcut for loop const ref type. More...
 
#define LCTYPED(T)   REGISTER tbci_traits<T>::loop_const_refval_type
 
#define UNROLL_DEPTH   4
 When unrolling the loops, I had the following architectural details in mind: More...
 
#define UNROLL1_PREF_KERNEL5(OPER, T, CA0, CA1, CA2)
 Non-unrolled kernel for 5 args with prefetching. More...
 
#define UNROLL1_KERNEL5(OPER)
 Non-unrolled kernel for 5 args without prefetching. More...
 
#define UNROLL1_KERNEL5_PREPARE   do {} while(0)
 
#define UNROLL1_KERNEL5_FIXUP   do {} while(0)
 
#define UNROLL2_PREF_KERNEL5(OPER, T, CA0, CA1, CA2)
 Twice unrolled kernel for 5 args with prefetching. More...
 
#define UNROLL2_KERNEL5(OPER)
 Twice unrolled kernel for 5 args without prefetching. More...
 
#define UNROLL2_KERNEL5_PREPARE   do {} while(0)
 
#define UNROLL2_KERNEL5_FIXUP   do {} while(0)
 
#define UNROLL4_PREF_KERNEL5(OPER, T, CA0, CA1, CA2)
 Four times unrolled kernel for 5 args with prefetching. More...
 
#define UNROLL4_KERNEL5(OPER)
 Four times unrolled kernel for 5 args without prefetching. More...
 
#define UNROLL4_KERNEL5_PREPARE   do {} while(0)
 
#define UNROLL4_KERNEL5_FIXUP   do {} while(0)
 
#define UNROLL8_PREF_KERNEL5(OPER, T, CA0, CA1, CA2)
 Eight times unrolled kernel for 5 args with prefetching. More...
 
#define UNROLL8_KERNEL5(OPER)
 Four times unrolled kernel for 5 args without prefetching. More...
 
#define UNROLL8_KERNEL5_PREPARE   do {} while(0)
 
#define UNROLL8_KERNEL5_FIXUP   do {} while(0)
 
#define PREF_AHEAD3(T, CA0, CA1, CA2)
 Initial prefetch ahead (3 pointers) More...
 
#define UNROLL1_PREF_KERNEL4(OPER, T, PREFETCH_X, CA0, CA1)
 Non-unrolled kernel for 4 args with prefetching. More...
 
#define UNROLL1_KERNEL4(OPER)
 Non-unrolled kernel for 4 args without prefetching. More...
 
#define UNROLL1_KERNEL4_PREPARE   do {} while(0)
 
#define UNROLL1_KERNEL4_FIXUP   do {} while(0)
 
#define UNROLL2_PREF_KERNEL4(OPER, T, PREFETCH_X, CA0, CA1)
 Twice unrolled kernel for 4 args with prefetching. More...
 
#define UNROLL2_KERNEL4(OPER)
 Twice unrolled kernel for 4 args without prefetching. More...
 
#define UNROLL2_KERNEL4_PREPARE   do {} while(0)
 
#define UNROLL2_KERNEL4_FIXUP   do {} while(0)
 
#define UNROLL4_PREF_KERNEL4(OPER, T, PREFETCH_X, CA0, CA1)
 Four times unrolled kernel for 4 args with prefetching. More...
 
#define UNROLL4_KERNEL4(OPER)
 Four times unrolled kernel for 4 args without prefetching. More...
 
#define UNROLL4_KERNEL4_PREPARE   do {} while(0)
 
#define UNROLL4_KERNEL4_FIXUP   do {} while(0)
 
#define UNROLL8_PREF_KERNEL4(OPER, T, PREFETCH_X, CA0, CA1)
 Eight times unrolled kernel for 4 args with prefetching. More...
 
#define UNROLL8_KERNEL4(OPER)
 Four times unrolled kernel for 4 args without prefetching. More...
 
#define UNROLL8_KERNEL4_PREPARE   do {} while(0)
 
#define UNROLL8_KERNEL4_FIXUP   do {} while(0)
 
#define PREF_AHEAD2(T, PREFETCH_X, CA0, CA1)
 Initial prefetch ahead (2 pointers) More...
 
#define UNROLL1_PREF_KERNEL3(OPER, T, PREFETCH_X, CA0)
 Non-unrolled kernel for 3 args with prefetching. More...
 
#define UNROLL1_KERNEL3(OPER)
 Non-unrolled kernel for 3 args without prefetching. More...
 
#define UNROLL1_KERNEL3_PREPARE   do {} while(0)
 
#define UNROLL1_KERNEL3_FIXUP   do {} while(0)
 
#define UNROLL2_PREF_KERNEL3(OPER, T, PREFETCH_X, CA0)
 Twice unrolled kernel for 3 args with prefetching. More...
 
#define UNROLL2_KERNEL3(OPER)
 Twice unrolled kernel for 3 args without prefetching. More...
 
#define UNROLL2_KERNEL3_PREPARE   do {} while(0)
 
#define UNROLL2_KERNEL3_FIXUP   do {} while(0)
 
#define UNROLL4_PREF_KERNEL3(OPER, T, PREFETCH_X, CA0)
 Four times unrolled kernel for 3 args with prefetching. More...
 
#define UNROLL4_KERNEL3(OPER)
 Four times unrolled kernel for 3 args without prefetching. More...
 
#define UNROLL4_KERNEL3_PREPARE   do {} while(0)
 
#define UNROLL4_KERNEL3_FIXUP   do {} while(0)
 
#define UNROLL8_PREF_KERNEL3(OPER, T, PREFETCH_X, CA0)
 Eight times unrolled kernel for 3 args with prefetching. More...
 
#define UNROLL8_KERNEL3(OPER)
 Four times unrolled kernel for 3 args without prefetching. More...
 
#define UNROLL8_KERNEL3_PREPARE   do {} while(0)
 
#define UNROLL8_KERNEL3_FIXUP   do {} while(0)
 
#define PREF_AHEAD1(T, PREFETCH_X, CA0)
 Initial prefetch ahead (1 pointer) More...
 
#define UNR_PREF_KERNEL5   UNROLL4_PREF_KERNEL5
 
#define UNR_KERNEL5   UNROLL4_KERNEL5
 
#define UNR_KERNEL5_PREP   UNROLL4_KERNEL5_PREPARE
 
#define UNR_KERNEL5_FIX   UNROLL4_KERNEL5_FIXUP
 
#define UNR_PREF_KERNEL4   UNROLL4_PREF_KERNEL4
 
#define UNR_KERNEL4   UNROLL4_KERNEL4
 
#define UNR_KERNEL4_PREP   UNROLL4_KERNEL4_PREPARE
 
#define UNR_KERNEL4_FIX   UNROLL4_KERNEL4_FIXUP
 
#define UNR_PREF_KERNEL3   UNROLL4_PREF_KERNEL3
 
#define UNR_KERNEL3   UNROLL4_KERNEL3
 
#define UNR_KERNEL3_PREP   UNROLL4_KERNEL3_PREPARE
 
#define UNR_KERNEL3_FIX   UNROLL4_KERNEL3_FIXUP
 
#define VKERN_TEMPL_3V_PREF(OP, T)   do {} while (0)
 Fragments to be combined for different cases 1,2,3 vector fields 0,1,2 scalars to multiply with variable number of data elements per cacheline 1,2,4,8,16 cachelines ahead prefetch 1,2,4,8 fold unrolling. More...
 
#define VKERN_TEMPL_2V_PREF(OP, T, PREFETCH_X, CW)   do {} while (0)
 
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)   do {} while (0)
 
#define VKERN_TEMPL_3V(FNAME, OP3)
 gcc-2.95.x seems to fail caching a const double& in a REGISTER. More...
 
#define VKERN_TEMPL_3V_C(FNAME, OP3)
 Operations of type vec = vec OP val * vec. More...
 
#define VKERN_TEMPL_3V_CC(FNAME, OP3)
 Operations of type vec = val * vec OP val * vec. More...
 
#define VKERN_TEMPL_2V(FNAME, OP2)
 Operations of type vec OP= vec. More...
 
#define VKERN_TEMPL_2V_C(FNAME, OP2)
 Operations of type VEC = VEC OP VAL or VAL OP VEC. More...
 
#define VKERN_TEMPL_2V_CC(FNAME, OP2)
 Operations of type VEC = VEC OP VAL or VAL OP VEC. More...
 
#define VKERN_TEMPL_2V_T(FNAME, OP2, TYPE)
 Operations of type TYPE = VEC OP VEC. More...
 
#define VKERN_TEMPL_1V(FNAME, OP1)
 Operations of type VEC = OP self. More...
 
#define VKERN_TEMPL_1V_C(FNAME, OP1)
 Operations of type VEC OP= VAL. More...
 
#define VKERN_TEMPL_1V_CC(FNAME, OP1)
 Operations of type VEC *= S OP= VAL. More...
 
#define VKERN_TEMPL_1V_T(FNAME, OP1, TYPE)
 Operations of type TYPE = OP VEC. More...
 
#define VKERN_TEMPL_1V_T_LD(FNAME, OP1, TYPE)
 Operations of type TYPE = OP VEC (using LONG_DOUBLE internally) More...
 

Detailed Description

macros for composing unrolled prefetching loops over arrays.

(c) Kurt Garloff, kurt@.nosp@m.garl.nosp@m.off.d.nosp@m.e, 7/2002, GNU LGPL v2

Id:
unroll_prefetch_def2.h,v 1.1.2.22 2022/11/03 17:28:11 garloff Exp

Definition in file unroll_prefetch_def2.h.

Macro Definition Documentation

#define LCTYPE (   T)    REGISTER typename tbci_traits<T>::loop_const_refval_type

Shortcut for loop const ref type.

Definition at line 14 of file unroll_prefetch_def2.h.

#define LCTYPED (   T)    REGISTER tbci_traits<T>::loop_const_refval_type

Definition at line 15 of file unroll_prefetch_def2.h.

#define PREF_AHEAD1 (   T,
  PREFETCH_X,
  CA0 
)

Initial prefetch ahead (1 pointer)

Definition at line 824 of file unroll_prefetch_def2.h.

#define PREF_AHEAD2 (   T,
  PREFETCH_X,
  CA0,
  CA1 
)

Initial prefetch ahead (2 pointers)

Definition at line 587 of file unroll_prefetch_def2.h.

#define PREF_AHEAD3 (   T,
  CA0,
  CA1,
  CA2 
)

Initial prefetch ahead (3 pointers)

Definition at line 288 of file unroll_prefetch_def2.h.

#define UNR_KERNEL3   UNROLL4_KERNEL3

Definition at line 907 of file unroll_prefetch_def2.h.

#define UNR_KERNEL3_FIX   UNROLL4_KERNEL3_FIXUP

Definition at line 909 of file unroll_prefetch_def2.h.

#define UNR_KERNEL3_PREP   UNROLL4_KERNEL3_PREPARE

Definition at line 908 of file unroll_prefetch_def2.h.

#define UNR_KERNEL4   UNROLL4_KERNEL4

Definition at line 902 of file unroll_prefetch_def2.h.

#define UNR_KERNEL4_FIX   UNROLL4_KERNEL4_FIXUP

Definition at line 904 of file unroll_prefetch_def2.h.

#define UNR_KERNEL4_PREP   UNROLL4_KERNEL4_PREPARE

Definition at line 903 of file unroll_prefetch_def2.h.

#define UNR_KERNEL5   UNROLL4_KERNEL5

Definition at line 897 of file unroll_prefetch_def2.h.

#define UNR_KERNEL5_FIX   UNROLL4_KERNEL5_FIXUP

Definition at line 899 of file unroll_prefetch_def2.h.

#define UNR_KERNEL5_PREP   UNROLL4_KERNEL5_PREPARE

Definition at line 898 of file unroll_prefetch_def2.h.

#define UNR_PREF_KERNEL3   UNROLL4_PREF_KERNEL3

Definition at line 906 of file unroll_prefetch_def2.h.

#define UNR_PREF_KERNEL4   UNROLL4_PREF_KERNEL4

Definition at line 901 of file unroll_prefetch_def2.h.

#define UNR_PREF_KERNEL5   UNROLL4_PREF_KERNEL5

Definition at line 896 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL3 (   OPER)
Value:
--i; \
OPER(res[0], f1, f2); \
++res
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Non-unrolled kernel for 3 args without prefetching.

Definition at line 659 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL3_FIXUP   do {} while(0)

Definition at line 665 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL3_PREPARE   do {} while(0)

Definition at line 664 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL4 (   OPER)
Value:
--i; \
OPER(res[0], v1[0], f1, f2); \
++v1; ++res
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Non-unrolled kernel for 4 args without prefetching.

Definition at line 388 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL4_FIXUP   do {} while(0)

Definition at line 394 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL4_PREPARE   do {} while(0)

Definition at line 393 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL5 (   OPER)
Value:
--i; \
OPER(res[0], v1[0], v2[0], f1, f2); \
++v1; ++v2; ++res
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Non-unrolled kernel for 5 args without prefetching.

Definition at line 63 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL5_FIXUP   do {} while(0)

Definition at line 69 of file unroll_prefetch_def2.h.

#define UNROLL1_KERNEL5_PREPARE   do {} while(0)

Definition at line 68 of file unroll_prefetch_def2.h.

#define UNROLL1_PREF_KERNEL3 (   OPER,
  T,
  PREFETCH_X,
  CA0 
)
Value:
OPER(res[0], f1, f2); \
--i; \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
++res
#define PREF_OFFS(T)
Definition: perf_opt.h:173
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Non-unrolled kernel for 3 args with prefetching.

Definition at line 652 of file unroll_prefetch_def2.h.

#define UNROLL1_PREF_KERNEL4 (   OPER,
  T,
  PREFETCH_X,
  CA0,
  CA1 
)
Value:
OPER(res[0], v1[0], f1, f2); \
--i; \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
++v1; \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
++res
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define PREF_OFFS(T)
Definition: perf_opt.h:173
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Non-unrolled kernel for 4 args with prefetching.

Definition at line 379 of file unroll_prefetch_def2.h.

#define UNROLL1_PREF_KERNEL5 (   OPER,
  T,
  CA0,
  CA1,
  CA2 
)
Value:
--i; \
OPER(res[0], v1[0], v2[0], f1, f2); \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
++v1; \
PREFETCH_R(v2 +PREF_OFFS(T), CA2); \
++v2; \
++res
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define PREF_OFFS(T)
Definition: perf_opt.h:173
#define PREFETCH_W(addr, loc)
Definition: basics.h:749
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Non-unrolled kernel for 5 args with prefetching.

Definition at line 52 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL3 (   OPER)
Value:
OPER(res[0], f1, f2); \
i -= 2; \
OPER(res[1], f1, f2); \
res += 2
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Twice unrolled kernel for 3 args without prefetching.

Definition at line 687 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL3_FIXUP   do {} while(0)

Definition at line 694 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL3_PREPARE   do {} while(0)

Definition at line 693 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL4 (   OPER)
Value:
OPER(res[0], v1[0], f1, f2); \
v1 += 2; i -= 2; \
OPER(res[1], v1[-1],f1, f2); \
res += 2
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Twice unrolled kernel for 4 args without prefetching.

Definition at line 421 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL4_FIXUP   do {} while(0)

Definition at line 428 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL4_PREPARE   do {} while(0)

Definition at line 427 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL5 (   OPER)
Value:
OPER(res[0], v1[0], v2[0], f1, f2); \
i -= 2; \
OPER(res[1], v1[1], v2[1], f1, f2); \
v1 += 2; v2 += 2; res += 2
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Twice unrolled kernel for 5 args without prefetching.

Definition at line 99 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL5_FIXUP   do {} while(0)

Definition at line 106 of file unroll_prefetch_def2.h.

#define UNROLL2_KERNEL5_PREPARE   do {} while(0)

Definition at line 105 of file unroll_prefetch_def2.h.

#define UNROLL2_PREF_KERNEL3 (   OPER,
  T,
  PREFETCH_X,
  CA0 
)
Value:
if (EL_PER_CL(T) <= 1) { \
OPER(res[0], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
i -= 2; \
OPER(res[1], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T)+1, CA0); \
res += 2; \
} else { \
OPER(res[0], f1, f2); \
i -= 2; \
OPER(res[1], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
res += 2; \
} \
#define EL_PER_CL(T)
Definition: perf_opt.h:172
#define PREF_OFFS(T)
Definition: perf_opt.h:173
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Twice unrolled kernel for 3 args with prefetching.

Definition at line 669 of file unroll_prefetch_def2.h.

#define UNROLL2_PREF_KERNEL4 (   OPER,
  T,
  PREFETCH_X,
  CA0,
  CA1 
)
Value:
if (EL_PER_CL(T) <= 1) { \
OPER(res[0], v1[0], f1, f2); \
i -= 2; \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
PREFETCH_R(v1 +PREF_OFFS(T)+1, CA1); \
OPER(res[1], v1[1], f1, f2); \
v1 += 2; \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
PREFETCH_X(res+PREF_OFFS(T)+1, CA0); \
res += 2; \
} else { \
OPER(res[0], v1[0], f1, f2); \
i -= 2; \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
OPER(res[1], v1[1], f1, f2); \
v1 += 2; \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
res += 2; \
} \
#define EL_PER_CL(T)
Definition: perf_opt.h:172
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define PREF_OFFS(T)
Definition: perf_opt.h:173
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Twice unrolled kernel for 4 args with prefetching.

Definition at line 398 of file unroll_prefetch_def2.h.

#define UNROLL2_PREF_KERNEL5 (   OPER,
  T,
  CA0,
  CA1,
  CA2 
)
Value:
if (EL_PER_CL(T) <= 1) { \
OPER(res[0], v1[0], v2[0], f1, f2); \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
PREFETCH_R(v1 +PREF_OFFS(T)+1, CA1); \
i -= 2; \
PREFETCH_R(v2 +PREF_OFFS(T), CA2); \
PREFETCH_R(v2 +PREF_OFFS(T)+1, CA2); \
OPER(res[1], v1[1], v2[1], f1, f2); \
v1 += 2; v2 += 2; \
res += 2; \
} else { \
OPER(res[0], v1[0], v2[0], f1, f2); \
PREFETCH_R(v1 +PREF_OFFS(T), CA1); \
i -= 2; \
PREFETCH_R(v2 +PREF_OFFS(T), CA2); \
OPER(res[1], v1[1], v2[1], f1, f2); \
v1 += 2; v2 += 2; \
res += 2; \
} \
#define EL_PER_CL(T)
Definition: perf_opt.h:172
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define PREF_OFFS(T)
Definition: perf_opt.h:173
#define PREFETCH_W(addr, loc)
Definition: basics.h:749
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Twice unrolled kernel for 5 args with prefetching.

Definition at line 73 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL3 (   OPER)
Value:
OPER(res[0], f1, f2); \
OPER(res[1], f1, f2); \
i -= 4; \
OPER(res[2], f1, f2); \
OPER(res[3], f1, f2); \
res += 4
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 3 args without prefetching.

Definition at line 730 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL3_FIXUP   do {} while(0)

Definition at line 739 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL3_PREPARE   do {} while(0)

Definition at line 738 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL4 (   OPER)
Value:
OPER(res[0], v1[0], f1, f2); \
OPER(res[1], v1[1], f1, f2); \
v1 += 4; i -= 4; \
OPER(res[2], v1[-2], f1, f2); \
OPER(res[3], v1[-1], f1, f2); \
res += 4
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 4 args without prefetching.

Definition at line 474 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL4_FIXUP   do {} while(0)

Definition at line 483 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL4_PREPARE   do {} while(0)

Definition at line 482 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL5 (   OPER)
Value:
OPER(res[0], v1[0], v2[0], f1, f2); \
OPER(res[1], v1[1], v2[1], f1, f2); \
i -= 4; \
OPER(res[2], v1[2], v2[2], f1, f2); \
OPER(res[3], v1[3], v2[3], f1, f2); \
v1 += 4; v2 += 4; \
res += 4
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 5 args without prefetching.

Definition at line 159 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL5_FIXUP   do {} while(0)

Definition at line 169 of file unroll_prefetch_def2.h.

#define UNROLL4_KERNEL5_PREPARE   do {} while(0)

Definition at line 168 of file unroll_prefetch_def2.h.

#define UNROLL4_PREF_KERNEL3 (   OPER,
  T,
  PREFETCH_X,
  CA0 
)
Value:
if (EL_PER_CL(T) <= 1) { \
OPER(res[0], f1, f2); \
i -= 4; \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
OPER(res[1], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T)+1, CA0); \
OPER(res[2], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T)+2, CA0); \
OPER(res[3], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T)+3, CA0); \
res += 4; \
} else if (EL_PER_CL(T) <= 2) { \
OPER(res[0], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
OPER(res[1], f1, f2); \
i -= 4; \
OPER(res[2], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T)+2, CA0); \
OPER(res[3], f1, f2); \
res += 4; \
} else { \
OPER(res[0], f1, f2); \
i -= 4; \
OPER(res[1], f1, f2); \
PREFETCH_X(res+PREF_OFFS(T), CA0); \
OPER(res[2], f1, f2); \
OPER(res[3], f1, f2); \
res += 4; \
}
#define EL_PER_CL(T)
Definition: perf_opt.h:172
#define PREF_OFFS(T)
Definition: perf_opt.h:173
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20

Four times unrolled kernel for 3 args with prefetching.

Definition at line 698 of file unroll_prefetch_def2.h.

#define UNROLL4_PREF_KERNEL4 (   OPER,
  T,
  PREFETCH_X,
  CA0,
  CA1 
)

Four times unrolled kernel for 4 args with prefetching.

Definition at line 432 of file unroll_prefetch_def2.h.

#define UNROLL4_PREF_KERNEL5 (   OPER,
  T,
  CA0,
  CA1,
  CA2 
)

Four times unrolled kernel for 5 args with prefetching.

Definition at line 110 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL3 (   OPER)
Value:
OPER(res[0], f1, f2); \
OPER(res[1], f1, f2); \
OPER(res[2], f1, f2); \
OPER(res[3], f1, f2); \
i -= 8; \
OPER(res[4], f1, f2); \
OPER(res[5], f1, f2); \
OPER(res[6], f1, f2); \
OPER(res[7], f1, f2); \
res += 8
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 3 args without prefetching.

Definition at line 807 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL3_FIXUP   do {} while(0)

Definition at line 820 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL3_PREPARE   do {} while(0)

Definition at line 819 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL4 (   OPER)
Value:
OPER(res[0], v1[0], f1, f2); \
OPER(res[1], v1[1], f1, f2); \
OPER(res[2], v1[2], f1, f2); \
OPER(res[3], v1[3], f1, f2); \
v1 += 8; i -= 8; \
OPER(res[4], v1[-4], f1, f2); \
OPER(res[5], v1[-3], f1, f2); \
OPER(res[6], v1[-2], f1, f2); \
OPER(res[7], v1[-1], f1, f2); \
res += 8
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 4 args without prefetching.

Definition at line 570 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL4_FIXUP   do {} while(0)

Definition at line 583 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL4_PREPARE   do {} while(0)

Definition at line 582 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL5 (   OPER)
Value:
OPER(res[0], v1[0], v2[0], f1, f2); \
OPER(res[1], v1[1], v2[1], f1, f2); \
OPER(res[2], v1[2], v2[2], f1, f2); \
OPER(res[3], v1[3], v2[3], f1, f2); \
i -= 8; \
OPER(res[4], v1[4], v2[4], f1, f2); \
OPER(res[5], v1[5], v2[5], f1, f2); \
OPER(res[6], v1[6], v2[6], f1, f2); \
OPER(res[7], v1[7], v2[7], f1, f2); \
v1 += 8; v2 += 8; \
res += 8
int i
Definition: LM_fit.h:71
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199

Four times unrolled kernel for 5 args without prefetching.

Definition at line 271 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL5_FIXUP   do {} while(0)

Definition at line 285 of file unroll_prefetch_def2.h.

#define UNROLL8_KERNEL5_PREPARE   do {} while(0)

Definition at line 284 of file unroll_prefetch_def2.h.

#define UNROLL8_PREF_KERNEL3 (   OPER,
  T,
  PREFETCH_X,
  CA0 
)

Eight times unrolled kernel for 3 args with prefetching.

Definition at line 743 of file unroll_prefetch_def2.h.

#define UNROLL8_PREF_KERNEL4 (   OPER,
  T,
  PREFETCH_X,
  CA0,
  CA1 
)

Eight times unrolled kernel for 4 args with prefetching.

Definition at line 487 of file unroll_prefetch_def2.h.

#define UNROLL8_PREF_KERNEL5 (   OPER,
  T,
  CA0,
  CA1,
  CA2 
)

Eight times unrolled kernel for 5 args with prefetching.

Definition at line 173 of file unroll_prefetch_def2.h.

#define UNROLL_DEPTH   4

When unrolling the loops, I had the following architectural details in mind:

  • We have a superscalar pipelined instruction execution. Which means
    • We can execute more than one instruction in parallel per cycle. This was the reason to mix FP and Integer insns.
    • That we should have some delay between doing a computation and using the result, as it computation has to go through the pipeline before the result becomes available.
    • Unlike in unroll_prefetch_def.h, we assume that the delay for address computation is more significant than using a good mix between integer and FP instructions.
  • We have a relatively slow memory and fast caches; therefore we issue prefetch instructions to trigger memory loads before the the data is needed. These prefetch insns are supposed to trigger the data to be transfered from memory into the local cache without causing the pipelines to stall. When the data is actually accessed, it should be in local cache and not cause any delay.
  • Though prefetching beyond the array should not cause segfaults, we avoid it for performance reasons. Especially important on SMP.

Funny enough, with this little knowledge, we do better than any compiler I found. Compaq cxx on alpha comes close, though. KG.

Definition at line 43 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V (   FNAME,
  OP1 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const);) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res) \
{ \
REGISTER long i = sz; \
REGISTER T* res = _res; \
do { \
UNR_KERNEL3(OP1); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP1(*res, f1, f2); \
++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL3_PREP
#define UNR_KERNEL3
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define UNR_KERNEL3_FIX
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type VEC = OP self.

Definition at line 1235 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V_C (   FNAME,
  OP1 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
LCTYPE(T) f2) \
{ \
REGISTER long i = sz; \
REGISTER T* res = _res; \
do { \
UNR_KERNEL3(OP1); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP1(*res, f1, f2); \
++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL3_PREP
#define UNR_KERNEL3
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define UNR_KERNEL3_FIX
#define LCTYPE(T)
Shortcut for loop const ref type.
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type VEC OP= VAL.

Definition at line 1261 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V_CC (   FNAME,
  OP1 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, LCTYPED(T), \
LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
LCTYPE(T) f1, \
LCTYPE(T) f2) \
{ \
REGISTER long i = sz; \
REGISTER T* res = _res; \
do { \
UNR_KERNEL3(OP1); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP1(*res, f1, f2); \
++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL3_PREP
#define UNR_KERNEL3
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define UNR_KERNEL3_FIX
#define LCTYPE(T)
Shortcut for loop const ref type.
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type VEC *= S OP= VAL.

Definition at line 1288 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V_PREF (   OP,
  T,
  PREFETCH_X,
  CW 
)    do {} while (0)

Definition at line 993 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V_T (   FNAME,
  OP1,
  TYPE 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, const T* const, TYPE&);) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
const T* const _res, \
TYPE &_f2) \
{ \
/* REGISTER tbci_traits<TYPE>::loop_refval_type f2(_f2); */ \
REGISTER TYPE f2(_f2), f1(0.0); \
REGISTER const T* res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL3(OP1); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP1(*res, f1, f2); \
++res; \
} \
_f2 = f2 - f1; \
}
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL3_PREP
#define UNR_KERNEL3
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define UNR_KERNEL3_FIX
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define CACHE_LOC_READ
Cache locality for read from and written to pointers 0: don&#39;t cache (streaming data, only accessed once).
Definition: perf_opt.h:165
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type TYPE = OP VEC.

Definition at line 1317 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_1V_T_LD (   FNAME,
  OP1,
  TYPE 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, const T* const, TYPE&);) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
const T* const _res, \
TYPE &_f2) \
{ \
/* REGISTER tbci_traits<TYPE>::loop_refval_type f2(_f2); */ \
REGISTER const T* res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL3(OP1); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP1(*res, f1, f2); \
++res; \
} \
_f2 = f2; \
}
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL3_PREP
#define UNR_KERNEL3
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define UNR_KERNEL3_FIX
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define CACHE_LOC_READ
Cache locality for read from and written to pointers 0: don&#39;t cache (streaming data, only accessed once).
Definition: perf_opt.h:165
#define LONG_DOUBLE
Definition: basics.h:219
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define VKERN_TEMPL_1V_PREF(OP, T, PREFETCH_X, CW)
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type TYPE = OP VEC (using LONG_DOUBLE internally)

Definition at line 1347 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_2V (   FNAME,
  OP2 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const);) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1) \
{ \
PREFETCH_R(_v1, 3); \
REGISTER const T *v1 = _v1; \
REGISTER T* res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL4(OP2); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP2(*res, *v1, f1, f2); \
++v1; ++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL4_FIX
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define INST(x)
Definition: basics.h:238
#define VKERN_TEMPL_2V_PREF(OP, T, PREFETCH_X, CW)
int i
Definition: LM_fit.h:71
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNR_KERNEL4
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define UNR_KERNEL4_PREP
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type vec OP= vec.

Definition at line 1108 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_2V_C (   FNAME,
  OP2 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const, \
LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1, \
LCTYPE(T) f2) \
{ \
PREFETCH_R(_v1, 3); \
REGISTER const T *v1 = _v1; \
REGISTER T* res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL4(OP2); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP2(*res, *v1, f1, f2); \
++v1; ++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL4_FIX
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define LCTYPE(T)
Shortcut for loop const ref type.
#define PREFETCH_W(addr, loc)
Definition: basics.h:749
#define INST(x)
Definition: basics.h:238
#define VKERN_TEMPL_2V_PREF(OP, T, PREFETCH_X, CW)
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNR_KERNEL4
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define UNR_KERNEL4_PREP
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type VEC = VEC OP VAL or VAL OP VEC.

Definition at line 1137 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_2V_CC (   FNAME,
  OP2 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const, \
LCTYPED(T), LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1, \
LCTYPE(T) f1, \
LCTYPE(T) f2) \
{ \
PREFETCH_R(_v1, 3); \
REGISTER const T *v1 = _v1; \
REGISTER T* res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL4(OP2); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP2(*res, *v1, f1, f2); \
++v1; ++res; \
} \
}
#define CACHE_LOC_WRITE
Definition: perf_opt.h:168
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL4_FIX
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define LCTYPE(T)
Shortcut for loop const ref type.
#define PREFETCH_W(addr, loc)
Definition: basics.h:749
#define INST(x)
Definition: basics.h:238
#define VKERN_TEMPL_2V_PREF(OP, T, PREFETCH_X, CW)
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNR_KERNEL4
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define UNR_KERNEL4_PREP
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type VEC = VEC OP VAL or VAL OP VEC.

Definition at line 1168 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_2V_PREF (   OP,
  T,
  PREFETCH_X,
  CW 
)    do {} while (0)

Definition at line 992 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_2V_T (   FNAME,
  OP2,
  TYPE 
)

Operations of type TYPE = VEC OP VEC.

Definition at line 1200 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_3V (   FNAME,
  OP3 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const, const T* RESTRICT const);) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1, \
const T* RESTRICT const _v2) \
{ \
PREFETCH_R(_v1, 3); PREFETCH_R(_v2, 3); \
REGISTER const T *v1 = _v1, *v2 = _v2; \
REGISTER T *res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL5(OP3); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP3(*res, *v1, *v2, f1, f2); \
++v1; ++v2; ++res; \
} \
}
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL5
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VKERN_TEMPL_3V_PREF(OP, T)
Fragments to be combined for different cases 1,2,3 vector fields 0,1,2 scalars to multiply with varia...
#define UNR_KERNEL5_FIX
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
Definition: bvector.h:54
#define UNR_KERNEL5_PREP
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

gcc-2.95.x seems to fail caching a const double& in a REGISTER.

So we have to use a local REGISTER var to force it doing so. for maximum performance. However, this is only beneficial in case we have an elementary type that does fit into a REGISTER. It would be nice to have macros that automatically do it when needed. However, sizeof(T) can't be evaluated by the preprocessor, so we can't know. Instead we use explicit specialization of our templates.Operations of type vec = vec OP vec

Definition at line 1013 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_3V_C (   FNAME,
  OP3 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const, \
const T* RESTRICT const, LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1, \
const T* RESTRICT const _v2, \
LCTYPE(T) f2) \
{ \
PREFETCH_R(_v1, 3); PREFETCH_R(_v2, 3); \
REGISTER const T *v1 = _v1. *v2 = _v2; \
REGISTER T *res = _res; \
REGISTER long i = sz; \
do { \
UNR_KERNEL5(OP3); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP3(*res, *v1, *v2, f1, f2); \
++v1; ++v2; ++res; \
} \
}
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL5
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VKERN_TEMPL_3V_PREF(OP, T)
Fragments to be combined for different cases 1,2,3 vector fields 0,1,2 scalars to multiply with varia...
#define UNR_KERNEL5_FIX
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define LCTYPE(T)
Shortcut for loop const ref type.
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
#define UNR_KERNEL5_PREP
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type vec = vec OP val * vec.

Definition at line 1043 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_3V_CC (   FNAME,
  OP3 
)
Value:
INST(template <typename T> class Vector<T> friend VEC_INLINE void FNAME \
(const unsigned long, T* RESTRICT const, const T* RESTRICT const, \
const T* RESTRICT const, LCTYPED(T), LCTYPED(T));) \
template <typename T> \
VEC_INLINE void FNAME (const unsigned long sz, \
T* RESTRICT const _res, \
const T* RESTRICT const _v1, \
const T* RESTRICT const _v2, \
LCTYPE(T) f1, \
LCTYPE(T) f2) \
{ \
PREFETCH_R(_v1, 3); PREFETCH_R(_v2, 3); \
REGISTER long i = sz; \
REGISTER const T *v1 = _v1, *v2 = _v2; \
REGISTER T *res = _res; \
do { \
UNR_KERNEL5(OP3); \
} while (i >= UNROLL_DEPTH); \
} \ \
for (; i; --i) { \
OP3(*res, *v1, *v2, f1, f2); \
++v1; ++v2; ++res; \
} \
}
#define REGISTER
Definition: basics.h:108
#define UNR_KERNEL5
for(REGISTER T *p1=c.vec,*p2=b.vec;p1< c.endvec;p1++, p2++)*p1
#define PREFETCH_R(addr, loc)
In case gcc does not yet support __builtin_prefetch(), we have handcoded assembly with gcc for a few ...
Definition: basics.h:748
#define VKERN_TEMPL_3V_PREF(OP, T)
Fragments to be combined for different cases 1,2,3 vector fields 0,1,2 scalars to multiply with varia...
#define UNR_KERNEL5_FIX
#define VEC_INLINE
Definition: basics.h:1266
if(value==0) return 1
#define LCTYPE(T)
Shortcut for loop const ref type.
#define INST(x)
Definition: basics.h:238
int i
Definition: LM_fit.h:71
#define LCTYPED(T)
Definition: bvector.h:54
#define UNR_KERNEL5_PREP
const Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > Vector< T > long int res
Definition: LM_fit.h:199
#define T
Definition: bdmatlib.cc:20
#define UNROLL_DEPTH
When unrolling the loops, I had the following architectural details in mind:
#define RESTRICT
Definition: basics.h:89
#define LIKELY(expr)
branch prediction note that we sometimes on purpose mark the unlikely possibility likely and vice ver...
Definition: basics.h:100

Operations of type vec = val * vec OP val * vec.

Definition at line 1075 of file unroll_prefetch_def2.h.

#define VKERN_TEMPL_3V_PREF (   OP,
  T 
)    do {} while (0)

Fragments to be combined for different cases 1,2,3 vector fields 0,1,2 scalars to multiply with variable number of data elements per cacheline 1,2,4,8,16 cachelines ahead prefetch 1,2,4,8 fold unrolling.

The structure is the same, always. (1) Before anything else, start read prefecthing. (2) Unrolled and (both read+write) prefetching loop (3) Unrolled loop (for the elements where prefecthing would be beyond array which could be a performance problem and for write prefecthing maybe a real problem (4) Non-unrolled loop for the remaining elements.

Definition at line 991 of file unroll_prefetch_def2.h.