DPDK
25.11.0
Toggle main menu visibility
Loading...
Searching...
No Matches
rte_pmu.h
Go to the documentation of this file.
1
/* SPDX-License-Identifier: BSD-3-Clause
2
* Copyright(c) 2025 Marvell
3
*/
4
5
#ifndef RTE_PMU_H
6
#define RTE_PMU_H
7
27
28
#include <linux/perf_event.h>
29
30
#include <
rte_atomic.h
>
31
#include <
rte_branch_prediction.h
>
32
#include <
rte_common.h
>
33
#include <rte_compat.h>
34
#include <
rte_debug.h
>
35
#include <
rte_lcore.h
>
36
37
#if defined(RTE_ARCH_ARM64)
38
#include "rte_pmu_pmc_arm64.h"
39
#elif defined(RTE_ARCH_X86_64)
40
#include "rte_pmu_pmc_x86_64.h"
41
#endif
42
43
#ifdef __cplusplus
44
extern
"C"
{
45
#endif
46
48
#define RTE_MAX_NUM_GROUP_EVENTS 8
49
53
struct
__rte_cache_aligned
rte_pmu_event_group
{
55
struct
perf_event_mmap_page *
mmap_pages
[
RTE_MAX_NUM_GROUP_EVENTS
];
56
int
fds
[
RTE_MAX_NUM_GROUP_EVENTS
];
57
TAILQ_ENTRY
(
rte_pmu_event_group
) next;
58
bool
enabled
;
59
};
60
64
struct
rte_pmu
{
65
struct
rte_pmu_event_group
event_groups
[RTE_MAX_LCORE];
66
unsigned
int
num_group_events
;
67
unsigned
int
initialized
;
68
char
*
name
;
69
TAILQ_HEAD
(, rte_pmu_event) event_list;
70
};
71
73
extern
struct
rte_pmu
rte_pmu
;
74
75
/* Each architecture supporting PMU needs to provide its own version. */
76
#ifndef rte_pmu_pmc_read
77
#define rte_pmu_pmc_read(index) ({ RTE_SET_USED(index); 0; })
78
#endif
79
93
__rte_experimental
94
static
__rte_always_inline
uint64_t
95
__rte_pmu_read_userpage
(
struct
perf_event_mmap_page *pc)
96
{
97
#define __RTE_PMU_READ_ONCE(x) (*(const volatile typeof(x) *)&(x))
98
uint64_t width, offset;
99
uint32_t seq, index;
100
int64_t pmc;
101
102
for
(;;) {
103
seq = __RTE_PMU_READ_ONCE(pc->lock);
104
rte_compiler_barrier
();
105
index = __RTE_PMU_READ_ONCE(pc->index);
106
offset = __RTE_PMU_READ_ONCE(pc->offset);
107
width = __RTE_PMU_READ_ONCE(pc->pmc_width);
108
109
/* index set to 0 means that particular counter cannot be used */
110
if
(
likely
(pc->cap_user_rdpmc && index)) {
111
pmc = rte_pmu_pmc_read(index - 1);
112
pmc <<= 64 - width;
113
pmc >>= 64 - width;
114
offset += pmc;
115
}
116
117
rte_compiler_barrier
();
118
119
if
(
likely
(__RTE_PMU_READ_ONCE(pc->lock) == seq))
120
return
offset;
121
}
122
123
return
0;
124
}
125
139
__rte_experimental
140
int
141
__rte_pmu_enable_group
(
struct
rte_pmu_event_group
*group);
142
152
__rte_experimental
153
int
154
rte_pmu_init
(
void
);
155
162
__rte_experimental
163
void
164
rte_pmu_fini
(
void
);
165
178
__rte_experimental
179
int
180
rte_pmu_add_event
(
const
char
*
name
);
181
202
__rte_experimental
203
static
__rte_always_inline
uint64_t
204
rte_pmu_read
(
unsigned
int
index)
205
{
206
#ifdef ALLOW_EXPERIMENTAL_API
207
unsigned
int
lcore_id =
rte_lcore_id
();
208
struct
rte_pmu_event_group
*group;
209
210
if
(
unlikely
(!
rte_pmu
.
initialized
))
211
return
0;
212
213
/* non-EAL threads are not supported */
214
if
(
unlikely
(lcore_id >= RTE_MAX_LCORE))
215
return
0;
216
217
if
(
unlikely
(index >=
rte_pmu
.
num_group_events
))
218
return
0;
219
220
group = &
rte_pmu
.
event_groups
[lcore_id];
221
if
(
unlikely
(!group->
enabled
)) {
222
if
(
__rte_pmu_enable_group
(group))
223
return
0;
224
}
225
226
return
__rte_pmu_read_userpage
(group->
mmap_pages
[index]);
227
#else
228
RTE_SET_USED
(index);
229
RTE_VERIFY(
false
);
230
#endif
231
}
232
233
#ifdef __cplusplus
234
}
235
#endif
236
237
#endif
/* RTE_PMU_H */
rte_atomic.h
rte_compiler_barrier
#define rte_compiler_barrier()
Definition
rte_atomic.h:157
rte_branch_prediction.h
likely
#define likely(x)
Definition
rte_branch_prediction.h:26
unlikely
#define unlikely(x)
Definition
rte_branch_prediction.h:43
rte_common.h
RTE_SET_USED
#define RTE_SET_USED(x)
Definition
rte_common.h:264
__rte_cache_aligned
#define __rte_cache_aligned
Definition
rte_common.h:739
__rte_always_inline
#define __rte_always_inline
Definition
rte_common.h:490
rte_debug.h
rte_lcore.h
rte_lcore_id
static unsigned rte_lcore_id(void)
Definition
rte_lcore.h:78
rte_pmu_add_event
__rte_experimental int rte_pmu_add_event(const char *name)
rte_pmu_read
static __rte_experimental __rte_always_inline uint64_t rte_pmu_read(unsigned int index)
Definition
rte_pmu.h:204
__rte_pmu_enable_group
__rte_experimental int __rte_pmu_enable_group(struct rte_pmu_event_group *group)
rte_pmu_init
__rte_experimental int rte_pmu_init(void)
rte_pmu_fini
__rte_experimental void rte_pmu_fini(void)
RTE_MAX_NUM_GROUP_EVENTS
#define RTE_MAX_NUM_GROUP_EVENTS
Definition
rte_pmu.h:48
__rte_pmu_read_userpage
static __rte_experimental __rte_always_inline uint64_t __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
Definition
rte_pmu.h:95
rte_pmu_event_group
Definition
rte_pmu.h:53
rte_pmu_event_group::mmap_pages
struct perf_event_mmap_page * mmap_pages[RTE_MAX_NUM_GROUP_EVENTS]
Definition
rte_pmu.h:55
rte_pmu_event_group::fds
int fds[RTE_MAX_NUM_GROUP_EVENTS]
Definition
rte_pmu.h:56
rte_pmu_event_group::enabled
bool enabled
Definition
rte_pmu.h:58
rte_pmu_event_group::TAILQ_ENTRY
TAILQ_ENTRY(rte_pmu_event_group) next
rte_pmu
Definition
rte_pmu.h:64
rte_pmu::TAILQ_HEAD
TAILQ_HEAD(, rte_pmu_event) event_list
rte_pmu::event_groups
struct rte_pmu_event_group event_groups[RTE_MAX_LCORE]
Definition
rte_pmu.h:65
rte_pmu::name
char * name
Definition
rte_pmu.h:68
rte_pmu::num_group_events
unsigned int num_group_events
Definition
rte_pmu.h:66
rte_pmu::initialized
unsigned int initialized
Definition
rte_pmu.h:67
lib
pmu
rte_pmu.h
Generated by
1.17.0