summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/affinity.c8
-rw-r--r--tools/perf/util/auxtrace.c2
-rw-r--r--tools/perf/util/counts.c2
-rw-r--r--tools/perf/util/cpumap.h3
-rw-r--r--tools/perf/util/cputopo.c4
-rw-r--r--tools/perf/util/evlist-hybrid.c11
-rw-r--r--tools/perf/util/evlist.c28
-rw-r--r--tools/perf/util/evsel.c45
-rw-r--r--tools/perf/util/evsel.h3
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/mmap.c2
-rw-r--r--tools/perf/util/parse-events.c67
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/parse-events.y17
-rw-r--r--tools/perf/util/perf_api_probe.c4
-rw-r--r--tools/perf/util/probe-event.c3
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/record.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c4
-rw-r--r--tools/perf/util/svghelper.c4
-rw-r--r--tools/perf/util/synthetic-events.c18
-rw-r--r--tools/perf/util/top.c6
24 files changed, 172 insertions, 81 deletions
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index f1e30d566db3..4d216c0dc425 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -62,7 +62,7 @@ void affinity__set(struct affinity *a, int cpu)
clear_bit(cpu, a->sched_cpus);
}
-void affinity__cleanup(struct affinity *a)
+static void __affinity__cleanup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
@@ -71,3 +71,9 @@ void affinity__cleanup(struct affinity *a)
zfree(&a->sched_cpus);
zfree(&a->orig_cpus);
}
+
+void affinity__cleanup(struct affinity *a)
+{
+ if (a != NULL)
+ __affinity__cleanup(a);
+}
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 5632efc44738..825336304a37 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
mp->idx = idx;
if (per_cpu) {
- mp->cpu = evlist->core.cpus->map[idx];
+ mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
diff --git a/tools/perf/util/counts.c b/tools/perf/util/counts.c
index 2b81707b9dba..7a447d918458 100644
--- a/tools/perf/util/counts.c
+++ b/tools/perf/util/counts.c
@@ -61,7 +61,7 @@ int evsel__alloc_counts(struct evsel *evsel)
struct perf_cpu_map *cpus = evsel__cpus(evsel);
int nthreads = perf_thread_map__nr(evsel->core.threads);
- evsel->counts = perf_counts__new(cpus ? cpus->nr : 1, nthreads);
+ evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads);
return evsel->counts != NULL ? 0 : -ENOMEM;
}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 0d3c2006a15d..703ae6d3386e 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -4,7 +4,6 @@
#include <stdbool.h>
#include <stdio.h>
-#include <stdbool.h>
#include <internal/cpumap.h>
#include <perf/cpumap.h>
@@ -57,7 +56,7 @@ struct perf_cpu cpu__max_present_cpu(void);
*/
static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus)
{
- return cpus->nr == 1 && cpus->map[0].cpu == -1;
+ return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1;
}
/**
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index e20b835a1194..d275d843c155 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -325,7 +325,7 @@ struct numa_topology *numa_topology__new(void)
if (!node_map)
goto out;
- nr = (u32) node_map->nr;
+ nr = (u32) perf_cpu_map__nr(node_map);
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
if (!tp)
@@ -334,7 +334,7 @@ struct numa_topology *numa_topology__new(void)
tp->nr = nr;
for (i = 0; i < nr; i++) {
- if (load_numa_node(&tp->nodes[i], node_map->map[i].cpu)) {
+ if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
numa_topology__delete(tp);
tp = NULL;
break;
diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
index 7c554234b43d..7f234215147d 100644
--- a/tools/perf/util/evlist-hybrid.c
+++ b/tools/perf/util/evlist-hybrid.c
@@ -124,22 +124,23 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
events_nr++;
- if (matched_cpus->nr > 0 && (unmatched_cpus->nr > 0 ||
- matched_cpus->nr < cpus->nr ||
- matched_cpus->nr < pmu->cpus->nr)) {
+ if (perf_cpu_map__nr(matched_cpus) > 0 &&
+ (perf_cpu_map__nr(unmatched_cpus) > 0 ||
+ perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
+ perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
perf_cpu_map__put(evsel->core.cpus);
perf_cpu_map__put(evsel->core.own_cpus);
evsel->core.cpus = perf_cpu_map__get(matched_cpus);
evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
- if (unmatched_cpus->nr > 0) {
+ if (perf_cpu_map__nr(unmatched_cpus) > 0) {
cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
buf1, pmu->name, evsel->name);
}
}
- if (matched_cpus->nr == 0) {
+ if (perf_cpu_map__nr(matched_cpus) == 0) {
evlist__remove(evlist, evsel);
evsel__delete(evsel);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6e88d404b5b3..eaad04e1672a 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -430,15 +430,19 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
{
struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
- struct affinity affinity;
+ struct affinity saved_affinity, *affinity = NULL;
bool has_imm = false;
- if (affinity__setup(&affinity) < 0)
- return;
+ // See explanation in evlist__close()
+ if (!cpu_map__is_dummy(evlist->core.cpus)) {
+ if (affinity__setup(&saved_affinity) < 0)
+ return;
+ affinity = &saved_affinity;
+ }
/* Disable 'immediate' events last */
for (int imm = 0; imm <= 1; imm++) {
- evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
+ evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
continue;
@@ -454,7 +458,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
break;
}
- affinity__cleanup(&affinity);
+ affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name))
continue;
@@ -487,12 +491,16 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
{
struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr;
- struct affinity affinity;
+ struct affinity saved_affinity, *affinity = NULL;
- if (affinity__setup(&affinity) < 0)
- return;
+ // See explanation in evlist__close()
+ if (!cpu_map__is_dummy(evlist->core.cpus)) {
+ if (affinity__setup(&saved_affinity) < 0)
+ return;
+ affinity = &saved_affinity;
+ }
- evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
+ evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name))
continue;
@@ -500,7 +508,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
continue;
evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
}
- affinity__cleanup(&affinity);
+ affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name))
continue;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2f6b18af49e5..22d3267ce294 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1064,6 +1064,17 @@ void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_un
{
}
+static void evsel__set_default_freq_period(struct record_opts *opts,
+ struct perf_event_attr *attr)
+{
+ if (opts->freq) {
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1130,14 +1141,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* We default some events to have a default interval. But keep
* it a weak assumption overridable by the user.
*/
- if (!attr->sample_period) {
- if (opts->freq) {
- attr->freq = 1;
- attr->sample_freq = opts->freq;
- } else {
- attr->sample_period = opts->default_interval;
- }
- }
+ if ((evsel->is_libpfm_event && !attr->sample_period) ||
+ (!evsel->is_libpfm_event && (!attr->sample_period ||
+ opts->user_freq != UINT_MAX ||
+ opts->user_interval != ULLONG_MAX)))
+ evsel__set_default_freq_period(opts, attr);
+
/*
* If attr->freq was set (here or earlier), ask for period
* to be sampled.
@@ -1782,7 +1791,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
nthreads = threads->nr;
if (evsel->core.fd == NULL &&
- perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
+ perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;
evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
@@ -2020,9 +2029,10 @@ retry_open:
test_attr__ready();
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, cpus->map[idx].cpu, group_fd, evsel->open_flags);
+ pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
- fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx].cpu,
+ fd = sys_perf_event_open(&evsel->core.attr, pid,
+ perf_cpu_map__cpu(cpus, idx).cpu,
group_fd, evsel->open_flags);
FD(evsel, idx, thread) = fd;
@@ -2038,7 +2048,8 @@ retry_open:
bpf_counter__install_pe(evsel, idx, fd);
if (unlikely(test_attr__enabled)) {
- test_attr__open(&evsel->core.attr, pid, cpus->map[idx],
+ test_attr__open(&evsel->core.attr, pid,
+ perf_cpu_map__cpu(cpus, idx),
fd, group_fd, evsel->open_flags);
}
@@ -2079,7 +2090,8 @@ try_fallback:
if (evsel__precise_ip_fallback(evsel))
goto retry_open;
- if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) {
+ if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
+ idx, threads, thread, err)) {
/* We just removed 1 thread, so lower the upper nthreads limit. */
nthreads--;
@@ -2119,7 +2131,7 @@ out_close:
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1);
+ return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
}
void evsel__close(struct evsel *evsel)
@@ -2131,8 +2143,7 @@ void evsel__close(struct evsel *evsel)
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
{
if (cpu_map_idx == -1)
- return evsel__open_cpu(evsel, cpus, NULL, 0,
- cpus ? cpus->nr : 1);
+ return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
}
@@ -2982,7 +2993,7 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads;
- if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
+ if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
return -ENOMEM;
return store_evsel_ids(evsel, evlist);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 5720ceebffac..041b42d33bf5 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -11,6 +11,7 @@
#include <perf/evsel.h>
#include "symbol_conf.h"
#include <internal/cpumap.h>
+#include <perf/cpumap.h>
struct bpf_object;
struct cgroup;
@@ -191,7 +192,7 @@ static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
static inline int evsel__nr_cpus(struct evsel *evsel)
{
- return evsel__cpus(evsel)->nr;
+ return perf_cpu_map__nr(evsel__cpus(evsel));
}
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 3901440aeff9..f70ba56912d4 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -16,6 +16,7 @@
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
+#include "path.h"
#include "srcline.h"
#include "symbol.h"
#include "sort.h"
@@ -1416,7 +1417,7 @@ static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, i
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
- snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
+ path__join(path, sizeof(path), dir_name, dent->d_name);
if (stat(path, &st))
continue;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 12261ed8c15b..0e8ff8d1e206 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -250,7 +250,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
nr_cpus = perf_cpu_map__nr(cpu_map);
for (idx = 0; idx < nr_cpus; idx++) {
- cpu = cpu_map->map[idx]; /* map c index to online cpu index */
+ cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
if (cpu__get_node(cpu) == node)
set_bit(cpu.cpu, mask->bits);
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index acf20ce98ce9..9739b05b999e 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1697,6 +1697,15 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
}
}
}
+
+ if (parse_state->fake_pmu) {
+ if (!parse_events_add_pmu(parse_state, list, str, head,
+ true, true)) {
+ pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
+ ok++;
+ }
+ }
+
out_err:
if (ok)
*listp = list;
@@ -2098,8 +2107,17 @@ static void perf_pmu__parse_init(void)
pmu = NULL;
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
list_for_each_entry(alias, &pmu->aliases, list) {
- if (strchr(alias->name, '-'))
+ char *tmp = strchr(alias->name, '-');
+
+ if (tmp) {
+ char *tmp2 = NULL;
+
+ tmp2 = strchr(tmp + 1, '-');
len++;
+ if (tmp2)
+ len++;
+ }
+
len++;
}
}
@@ -2119,8 +2137,20 @@ static void perf_pmu__parse_init(void)
list_for_each_entry(alias, &pmu->aliases, list) {
struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
char *tmp = strchr(alias->name, '-');
+ char *tmp2 = NULL;
- if (tmp != NULL) {
+ if (tmp)
+ tmp2 = strchr(tmp + 1, '-');
+ if (tmp2) {
+ SET_SYMBOL(strndup(alias->name, tmp - alias->name),
+ PMU_EVENT_SYMBOL_PREFIX);
+ p++;
+ tmp++;
+ SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
+ p++;
+ SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
+ len += 3;
+ } else if (tmp) {
SET_SYMBOL(strndup(alias->name, tmp - alias->name),
PMU_EVENT_SYMBOL_PREFIX);
p++;
@@ -2147,23 +2177,38 @@ err:
*/
int perf_pmu__test_parse_init(void)
{
- struct perf_pmu_event_symbol *list;
+ struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
+ {(char *)"read", PMU_EVENT_SYMBOL},
+ {(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
+ {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
+ {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
+ {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
+ };
+ unsigned long i, j;
- list = malloc(sizeof(*list) * 1);
+ tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
if (!list)
return -ENOMEM;
- list->type = PMU_EVENT_SYMBOL;
- list->symbol = strdup("read");
-
- if (!list->symbol) {
- free(list);
- return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
+ tmp->type = symbols[i].type;
+ tmp->symbol = strdup(symbols[i].symbol);
+ if (!list->symbol)
+ goto err_free;
}
perf_pmu_events_list = list;
- perf_pmu_events_list_num = 1;
+ perf_pmu_events_list_num = ARRAY_SIZE(symbols);
+
+ qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
+ sizeof(struct perf_pmu_event_symbol), comp_pmu);
return 0;
+
+err_free:
+ for (j = 0, tmp = list; j < i; j++, tmp++)
+ free(tmp->symbol);
+ free(list);
+ return -ENOMEM;
}
enum perf_pmu_event_symbol_type
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index c7fc93f54577..a38b8b160e80 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -53,6 +53,7 @@ enum perf_pmu_event_symbol_type {
PMU_EVENT_SYMBOL, /* normal style PMU event */
PMU_EVENT_SYMBOL_PREFIX, /* prefix of pre-suf style event */
PMU_EVENT_SYMBOL_SUFFIX, /* suffix of pre-suf style event */
+ PMU_EVENT_SYMBOL_SUFFIX2, /* suffix of pre-suf2 style event */
};
struct perf_pmu_event_symbol {
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 4efe9872c667..5b6e4b5249cf 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -149,6 +149,8 @@ static int pmu_str_check(yyscan_t scanner, struct parse_events_state *parse_stat
return PE_PMU_EVENT_PRE;
case PMU_EVENT_SYMBOL_SUFFIX:
return PE_PMU_EVENT_SUF;
+ case PMU_EVENT_SYMBOL_SUFFIX2:
+ return PE_PMU_EVENT_SUF2;
case PMU_EVENT_SYMBOL:
return parse_state->fake_pmu
? PE_PMU_EVENT_FAKE : PE_KERNEL_PMU_EVENT;
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 174158982fae..be8c51770051 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -69,7 +69,7 @@ static void inc_group_count(struct list_head *list,
%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
%token PE_ERROR
-%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
+%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
%token PE_ARRAY_ALL PE_ARRAY_RANGE
%token PE_DRV_CFG_TERM
%type <num> PE_VALUE
@@ -87,7 +87,7 @@ static void inc_group_count(struct list_head *list,
%type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
-%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
+%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
%type <str> PE_DRV_CFG_TERM
%type <str> event_pmu_name
%destructor { free ($$); } <str>
@@ -372,6 +372,19 @@ PE_KERNEL_PMU_EVENT opt_pmu_config
$$ = list;
}
|
+PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF '-' PE_PMU_EVENT_SUF2 sep_dc
+{
+ struct list_head *list;
+ char pmu_name[128];
+ snprintf(pmu_name, sizeof(pmu_name), "%s-%s-%s", $1, $3, $5);
+ free($1);
+ free($3);
+ free($5);
+ if (parse_events_multi_pmu_add(_parse_state, pmu_name, NULL, &list) < 0)
+ YYABORT;
+ $$ = list;
+}
+|
PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
{
struct list_head *list;
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index 734d006d9a8c..c28dd50bd571 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
cpus = perf_cpu_map__new(NULL);
if (!cpus)
return false;
- cpu = cpus->map[0];
+ cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
do {
@@ -144,7 +144,7 @@ bool perf_can_record_cpu_wide(void)
if (!cpus)
return false;
- cpu = cpus->map[0];
+ cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b2a02c9ab8ea..a834918a0a0d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -3083,6 +3083,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
for (j = 0; j < num_matched_functions; j++) {
sym = syms[j];
+ if (sym->type != STT_FUNC)
+ continue;
+
/* There can be duplicated symbols in the map */
for (i = 0; i < j; i++)
if (sym->start == syms[i]->start) {
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index f3e5131f183c..52d8995cfd73 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -638,17 +638,17 @@ static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
{
struct pyrf_cpu_map *pcpus = (void *)obj;
- return pcpus->cpus->nr;
+ return perf_cpu_map__nr(pcpus->cpus);
}
static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_cpu_map *pcpus = (void *)obj;
- if (i >= pcpus->cpus->nr)
+ if (i >= perf_cpu_map__nr(pcpus->cpus))
return NULL;
- return Py_BuildValue("i", pcpus->cpus->map[i]);
+ return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
}
static PySequenceMethods pyrf_cpu_map__sequence_methods = {
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 20461f174991..007a64681416 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
if (opts->group)
evlist__set_leader(evlist);
- if (evlist->core.cpus->map[0].cpu < 0)
+ if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec();
@@ -248,11 +248,11 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus)
- cpu = cpus->map[0];
+ cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
} else {
- cpu = evlist->core.cpus->map[0];
+ cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
}
while (1) {
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index f5ad0e62227a..e752e1f4a5f0 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1607,8 +1607,8 @@ static void python_process_stat(struct perf_stat_config *config,
}
for (thread = 0; thread < threads->nr; thread++) {
- for (cpu = 0; cpu < cpus->nr; cpu++) {
- process_stat(counter, cpus->map[cpu],
+ for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
+ process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
perf_thread_map__pid(threads, thread), tstamp,
perf_counts(counter->counts, cpu, thread));
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f19348dddd55..2c0d30f08e78 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2537,8 +2537,8 @@ int perf_session__cpu_bitmap(struct perf_session *session,
return -1;
}
- for (i = 0; i < map->nr; i++) {
- struct perf_cpu cpu = map->map[i];
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
if (cpu.cpu >= nr_cpus) {
pr_err("Requested CPU %d too large. "
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 4c9f211249db..1e0c731fc539 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -734,8 +734,8 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
if (!m)
return -1;
- for (i = 0; i < m->nr; i++) {
- c = m->map[i];
+ for (i = 0; i < perf_cpu_map__nr(m); i++) {
+ c = perf_cpu_map__cpu(m, i);
if (c.cpu >= nr_cpus) {
ret = -1;
break;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index c9ba8050cc2b..70f095624a0b 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1186,12 +1186,12 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
static void synthesize_cpus(struct cpu_map_entries *cpus,
struct perf_cpu_map *map)
{
- int i;
+ int i, map_nr = perf_cpu_map__nr(map);
- cpus->nr = map->nr;
+ cpus->nr = map_nr;
- for (i = 0; i < map->nr; i++)
- cpus->cpu[i] = map->map[i].cpu;
+ for (i = 0; i < map_nr; i++)
+ cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
}
static void synthesize_mask(struct perf_record_record_cpu_map *mask,
@@ -1202,13 +1202,13 @@ static void synthesize_mask(struct perf_record_record_cpu_map *mask,
mask->nr = BITS_TO_LONGS(max);
mask->long_size = sizeof(long);
- for (i = 0; i < map->nr; i++)
- set_bit(map->map[i].cpu, mask->mask);
+ for (i = 0; i < perf_cpu_map__nr(map); i++)
+ set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
}
static size_t cpus_size(struct perf_cpu_map *map)
{
- return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
+ return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
}
static size_t mask_size(struct perf_cpu_map *map, int *max)
@@ -1217,9 +1217,9 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
*max = 0;
- for (i = 0; i < map->nr; i++) {
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
/* bit position of the cpu is + 1 */
- int bit = map->map[i].cpu + 1;
+ int bit = perf_cpu_map__cpu(map, i).cpu + 1;
if (bit > *max)
*max = bit;
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 27945eeb0cb5..c1ebfc5d2e0c 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -95,15 +95,15 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
- top->evlist->core.cpus->nr > 1 ? "s" : "",
+ perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
- top->evlist->core.cpus->nr,
- top->evlist->core.cpus->nr > 1 ? "s" : "");
+ perf_cpu_map__nr(top->evlist->core.cpus),
+ perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
}
perf_top__reset_sample_counters(top);