summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h28
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-libtracefs.c10
-rw-r--r--tools/include/linux/list_sort.h14
-rw-r--r--tools/include/uapi/linux/perf_event.h15
-rw-r--r--tools/lib/list_sort.c252
-rw-r--r--tools/lib/perf/cpumap.c16
-rw-r--r--tools/lib/perf/include/perf/event.h6
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt7
-rw-r--r--tools/perf/Documentation/perf-kmem.txt13
-rw-r--r--tools/perf/Documentation/perf-list.txt4
-rw-r--r--tools/perf/Documentation/perf-record.txt16
-rw-r--r--tools/perf/MANIFEST1
-rw-r--r--tools/perf/Makefile.config21
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/arm64/util/pmu.c2
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h2
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c2
-rw-r--r--tools/perf/arch/riscv64/annotate/instructions.c34
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c28
-rw-r--r--tools/perf/bench/evlist-open-close.c7
-rw-r--r--tools/perf/bench/synthesize.c4
-rw-r--r--tools/perf/builtin-daemon.c15
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-list.c42
-rw-r--r--tools/perf/builtin-record.c48
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c4
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json34
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json89
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/other.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json72
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json120
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/nds32/n13/atcpmu.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json2
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json2
-rw-r--r--tools/perf/pmu-events/jevents.c32
-rw-r--r--tools/perf/pmu-events/jsmn.c43
-rw-r--r--tools/perf/pmu-events/pmu-events.h8
-rw-r--r--tools/perf/tests/code-reading.c3
-rw-r--r--tools/perf/tests/expand-cgroup.c2
-rw-r--r--tools/perf/tests/expr.c163
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c4
-rw-r--r--tools/perf/tests/parse-events.c8
-rw-r--r--tools/perf/tests/parse-metric.c2
-rw-r--r--tools/perf/tests/pmu-events.c184
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh22
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh22
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c102
-rw-r--r--tools/perf/util/Build5
-rw-r--r--tools/perf/util/annotate.c6
-rw-r--r--tools/perf/util/bpf_counter.c8
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c8
-rw-r--r--tools/perf/util/debug.c19
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/event.c18
-rw-r--r--tools/perf/util/event.h5
-rw-r--r--tools/perf/util/evsel.c17
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/expr.c159
-rw-r--r--tools/perf/util/expr.h34
-rw-r--r--tools/perf/util/expr.l15
-rw-r--r--tools/perf/util/expr.y325
-rw-r--r--tools/perf/util/intel-pt.c85
-rw-r--r--tools/perf/util/machine.c10
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/metricgroup.c1450
-rw-r--r--tools/perf/util/metricgroup.h37
-rw-r--r--tools/perf/util/mmap.c11
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/parse-events-hybrid.c34
-rw-r--r--tools/perf/util/parse-events-hybrid.h6
-rw-r--r--tools/perf/util/parse-events.c256
-rw-r--r--tools/perf/util/parse-events.h17
-rw-r--r--tools/perf/util/parse-events.l19
-rw-r--r--tools/perf/util/parse-events.y27
-rw-r--r--tools/perf/util/pfm.c3
-rw-r--r--tools/perf/util/pmu.c51
-rw-r--r--tools/perf/util/pmu.h12
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/record.h1
-rw-r--r--tools/perf/util/s390-sample-raw.c6
-rw-r--r--tools/perf/util/session.c198
-rw-r--r--tools/perf/util/session.h10
-rw-r--r--tools/perf/util/srcline.c338
-rw-r--r--tools/perf/util/stat-shadow.c75
-rw-r--r--tools/perf/util/synthetic-events.c73
-rw-r--r--tools/perf/util/synthetic-events.h20
-rw-r--r--tools/perf/util/tool.h1
137 files changed, 3967 insertions, 1440 deletions
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index 578b3ee86105..749a2e3af89e 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -61,27 +61,35 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_PMC4,
PERF_REG_POWERPC_PMC5,
PERF_REG_POWERPC_PMC6,
- /* Max regs without the extended regs */
+ PERF_REG_POWERPC_SDAR,
+ PERF_REG_POWERPC_SIAR,
+ /* Max mask value for interrupt regs w/o extended regs */
PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
+ /* Max mask value for interrupt regs including extended regs */
+ PERF_REG_EXTENDED_MAX = PERF_REG_POWERPC_SIAR + 1,
};
#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
-/* Exclude MMCR3, SIER2, SIER3 for CPU_FTR_ARCH_300 */
-#define PERF_EXCLUDE_REG_EXT_300 (7ULL << PERF_REG_POWERPC_MMCR3)
-
/*
* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
- * includes 9 SPRS from MMCR0 to PMC6 excluding the
- * unsupported SPRS in PERF_EXCLUDE_REG_EXT_300.
+ * includes 11 SPRS from MMCR0 to SIAR excluding the
+ * unsupported SPRS MMCR3, SIER2 and SIER3.
*/
-#define PERF_REG_PMU_MASK_300 ((0xfffULL << PERF_REG_POWERPC_MMCR0) - PERF_EXCLUDE_REG_EXT_300)
+#define PERF_REG_PMU_MASK_300 \
+ ((1ULL << PERF_REG_POWERPC_MMCR0) | (1ULL << PERF_REG_POWERPC_MMCR1) | \
+ (1ULL << PERF_REG_POWERPC_MMCR2) | (1ULL << PERF_REG_POWERPC_PMC1) | \
+ (1ULL << PERF_REG_POWERPC_PMC2) | (1ULL << PERF_REG_POWERPC_PMC3) | \
+ (1ULL << PERF_REG_POWERPC_PMC4) | (1ULL << PERF_REG_POWERPC_PMC5) | \
+ (1ULL << PERF_REG_POWERPC_PMC6) | (1ULL << PERF_REG_POWERPC_SDAR) | \
+ (1ULL << PERF_REG_POWERPC_SIAR))
/*
* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
- * includes 12 SPRs from MMCR0 to PMC6.
+ * includes 14 SPRs from MMCR0 to SIAR.
*/
-#define PERF_REG_PMU_MASK_31 (0xfffULL << PERF_REG_POWERPC_MMCR0)
+#define PERF_REG_PMU_MASK_31 \
+ (PERF_REG_PMU_MASK_300 | (1ULL << PERF_REG_POWERPC_MMCR3) | \
+ (1ULL << PERF_REG_POWERPC_SIER2) | (1ULL << PERF_REG_POWERPC_SIER3))
-#define PERF_REG_EXTENDED_MAX (PERF_REG_POWERPC_PMC6 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 3dd2f68366f9..45a9a59828c3 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -52,6 +52,7 @@ FEATURE_TESTS_BASIC := \
libslang \
libslang-include-subdir \
libtraceevent \
+ libtracefs \
libcrypto \
libunwind \
pthread-attr-setaffinity-np \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index eff55d287db1..d024b5204ba0 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -36,6 +36,7 @@ FILES= \
test-libslang.bin \
test-libslang-include-subdir.bin \
test-libtraceevent.bin \
+ test-libtracefs.bin \
test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
@@ -199,6 +200,9 @@ $(OUTPUT)test-libslang-include-subdir.bin:
$(OUTPUT)test-libtraceevent.bin:
$(BUILD) -ltraceevent
+$(OUTPUT)test-libtracefs.bin:
+ $(BUILD) -ltracefs
+
$(OUTPUT)test-libcrypto.bin:
$(BUILD) -lcrypto
diff --git a/tools/build/feature/test-libtracefs.c b/tools/build/feature/test-libtracefs.c
new file mode 100644
index 000000000000..8eff16c0c10b
--- /dev/null
+++ b/tools/build/feature/test-libtracefs.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <tracefs/tracefs.h>
+
+int main(void)
+{
+ struct tracefs_instance *inst = tracefs_instance_create("dummy");
+
+ tracefs_instance_destroy(inst);
+ return 0;
+}
diff --git a/tools/include/linux/list_sort.h b/tools/include/linux/list_sort.h
new file mode 100644
index 000000000000..453105f74e05
--- /dev/null
+++ b/tools/include/linux/list_sort.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIST_SORT_H
+#define _LINUX_LIST_SORT_H
+
+#include <linux/types.h>
+
+struct list_head;
+
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
+#endif
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index f92880a15645..c89535de1ec8 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1141,6 +1141,21 @@ enum perf_event_type {
*/
PERF_RECORD_TEXT_POKE = 20,
+ /*
+ * Data written to the AUX area by hardware due to aux_output, may need
+ * to be matched to the event by an architecture-specific hardware ID.
+ * This records the hardware ID, but requires sample_id to provide the
+ * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
+ * records from multiple events.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 hw_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+
PERF_RECORD_MAX, /* non-ABI */
};
diff --git a/tools/lib/list_sort.c b/tools/lib/list_sort.c
new file mode 100644
index 000000000000..10c067e3a8d2
--- /dev/null
+++ b/tools/lib/list_sort.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+
+/*
+ * Returns a list organized in an intermediate format suited
+ * to chaining of merge() calls: null-terminated, no reserved or
+ * sentinel head node, "prev" links not maintained.
+ */
+__attribute__((nonnull(2,3,4)))
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *head, **tail = &head;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ *tail = a;
+ tail = &a->next;
+ a = a->next;
+ if (!a) {
+ *tail = b;
+ break;
+ }
+ } else {
+ *tail = b;
+ tail = &b->next;
+ b = b->next;
+ if (!b) {
+ *tail = a;
+ break;
+ }
+ }
+ }
+ return head;
+}
+
+/*
+ * Combine final list merge with restoration of standard doubly-linked
+ * list structure. This approach duplicates code from merge(), but
+ * runs faster than the tidier alternatives of either a separate final
+ * prev-link restoration pass, or maintaining the prev links
+ * throughout.
+ */
+__attribute__((nonnull(2,3,4,5)))
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *tail = head;
+ u8 count = 0;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ tail->next = a;
+ a->prev = tail;
+ tail = a;
+ a = a->next;
+ if (!a)
+ break;
+ } else {
+ tail->next = b;
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ if (!b) {
+ b = a;
+ break;
+ }
+ }
+ }
+
+ /* Finish linking remainder of list b on to tail */
+ tail->next = b;
+ do {
+ /*
+ * If the merge is highly unbalanced (e.g. the input is
+ * already sorted), this loop may run many iterations.
+ * Continue callbacks to the client even though no
+ * element comparison is needed, so the client's cmp()
+ * routine can invoke cond_resched() periodically.
+ */
+ if (unlikely(!++count))
+ cmp(priv, b, b);
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ } while (b);
+
+ /* And the final links to make a circular doubly-linked list */
+ tail->next = head;
+ head->prev = tail;
+}
+
+/**
+ * list_sort - sort a list
+ * @priv: private data, opaque to list_sort(), passed to @cmp
+ * @head: the list to sort
+ * @cmp: the elements comparison function
+ *
+ * The comparison function @cmp must return > 0 if @a should sort after
+ * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
+ * sort before @b *or* their original order should be preserved. It is
+ * always called with the element that came first in the input in @a,
+ * and list_sort is a stable sort, so it is not necessary to distinguish
+ * the @a < @b and @a == @b cases.
+ *
+ * This is compatible with two styles of @cmp function:
+ * - The traditional style which returns <0 / =0 / >0, or
+ * - Returning a boolean 0/1.
+ * The latter offers a chance to save a few cycles in the comparison
+ * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
+ *
+ * A good way to write a multi-word comparison is::
+ *
+ * if (a->high != b->high)
+ * return a->high > b->high;
+ * if (a->middle != b->middle)
+ * return a->middle > b->middle;
+ * return a->low > b->low;
+ *
+ *
+ * This mergesort is as eager as possible while always performing at least
+ * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
+ * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
+ *
+ * Thus, it will avoid cache thrashing as long as 3*2^k elements can
+ * fit into the cache. Not quite as good as a fully-eager bottom-up
+ * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
+ * the common case that everything fits into L1.
+ *
+ *
+ * The merging is controlled by "count", the number of elements in the
+ * pending lists. This is beautifully simple code, but rather subtle.
+ *
+ * Each time we increment "count", we set one bit (bit k) and clear
+ * bits k-1 .. 0. Each time this happens (except the very first time
+ * for each bit, when count increments to 2^k), we merge two lists of
+ * size 2^k into one list of size 2^(k+1).
+ *
+ * This merge happens exactly when the count reaches an odd multiple of
+ * 2^k, which is when we have 2^k elements pending in smaller lists,
+ * so it's safe to merge away two lists of size 2^k.
+ *
+ * After this happens twice, we have created two lists of size 2^(k+1),
+ * which will be merged into a list of size 2^(k+2) before we create
+ * a third list of size 2^(k+1), so there are never more than two pending.
+ *
+ * The number of pending lists of size 2^k is determined by the
+ * state of bit k of "count" plus two extra pieces of information:
+ *
+ * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
+ * - Whether the higher-order bits are zero or non-zero (i.e.
+ * is count >= 2^(k+1)).
+ *
+ * There are six states we distinguish. "x" represents some arbitrary
+ * bits, and "y" represents some arbitrary non-zero bits:
+ * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
+ * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * (merge and loop back to state 2)
+ *
+ * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
+ * bit k-1 is set while the more significant bits are non-zero) and
+ * merge them away in the 5->2 transition. Note in particular that just
+ * before the 5->2 transition, all lower-order bits are 11 (state 3),
+ * so there is one list of each smaller size.
+ *
+ * When we reach the end of the input, we merge all the pending
+ * lists, from smallest to largest. If you work through cases 2 to
+ * 5 above, you can see that the number of elements we merge with a list
+ * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
+ * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
+ */
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
+{
+ struct list_head *list = head->next, *pending = NULL;
+ size_t count = 0; /* Count of pending */
+
+ if (list == head->prev) /* Zero or one elements */
+ return;
+
+ /* Convert to a null-terminated singly-linked list. */
+ head->prev->next = NULL;
+
+ /*
+ * Data structure invariants:
+ * - All lists are singly linked and null-terminated; prev
+ * pointers are not maintained.
+ * - pending is a prev-linked "list of lists" of sorted
+ * sublists awaiting further merging.
+ * - Each of the sorted sublists is power-of-two in size.
+ * - Sublists are sorted by size and age, smallest & newest at front.
+ * - There are zero to two sublists of each size.
+ * - A pair of pending sublists are merged as soon as the number
+ * of following pending elements equals their size (i.e.
+ * each time count reaches an odd multiple of that size).
+ * That ensures each later final merge will be at worst 2:1.
+ * - Each round consists of:
+ * - Merging the two sublists selected by the highest bit
+ * which flips when count is incremented, and
+ * - Adding an element from the input as a size-1 sublist.
+ */
+ do {
+ size_t bits;
+ struct list_head **tail = &pending;
+
+ /* Find the least-significant clear bit in count */
+ for (bits = count; bits & 1; bits >>= 1)
+ tail = &(*tail)->prev;
+ /* Do the indicated merge */
+ if (likely(bits)) {
+ struct list_head *a = *tail, *b = a->prev;
+
+ a = merge(priv, cmp, b, a);
+ /* Install the merged result in place of the inputs */
+ a->prev = b->prev;
+ *tail = a;
+ }
+
+ /* Move one element from input list to pending */
+ list->prev = pending;
+ pending = list;
+ list = list->next;
+ pending->next = NULL;
+ count++;
+ } while (list);
+
+ /* End of input; merge together all the pending lists. */
+ list = pending;
+ pending = pending->prev;
+ for (;;) {
+ struct list_head *next = pending->prev;
+
+ if (!next)
+ break;
+ list = merge(priv, cmp, pending, list);
+ pending = next;
+ }
+ /* The final merge, rebuilding prev links */
+ merge_final(priv, cmp, head, pending, list);
+}
+EXPORT_SYMBOL(list_sort);
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 6d8e521c59e1..adaad3dddf6e 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -270,11 +270,19 @@ bool perf_cpu_map__empty(const struct perf_cpu_map *map)
int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
{
- int i;
+ int low = 0, high = cpus->nr;
- for (i = 0; i < cpus->nr; ++i) {
- if (cpus->map[i] == cpu)
- return i;
+ while (low < high) {
+ int idx = (low + high) / 2,
+ cpu_at_idx = cpus->map[idx];
+
+ if (cpu_at_idx == cpu)
+ return idx;
+
+ if (cpu_at_idx > cpu)
+ high = idx;
+ else
+ low = idx + 1;
}
return -1;
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 4d0c02ba3f7d..75ee385fb078 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -289,6 +289,11 @@ struct perf_record_itrace_start {
__u32 tid;
};
+struct perf_record_aux_output_hw_id {
+ struct perf_event_header header;
+ __u64 hw_id;
+};
+
struct perf_record_thread_map_entry {
__u64 pid;
char comm[16];
@@ -414,6 +419,7 @@ union perf_event {
struct perf_record_auxtrace_error auxtrace_error;
struct perf_record_aux aux;
struct perf_record_itrace_start itrace_start;
+ struct perf_record_aux_output_hw_id aux_output_hw_id;
struct perf_record_switch context_switch;
struct perf_record_thread_map thread_map;
struct perf_record_cpu_map cpu_map;
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index db465fa7ee91..553c3e08fa4a 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -1144,7 +1144,12 @@ Recording is selected by using the aux-output config term e.g.
perf record -c 10000 -e '{intel_pt/branch=0/,cycles/aux-output/ppp}' uname
-Note that currently, software only supports redirecting at most one PEBS event.
+Originally, software only supported redirecting at most one PEBS event because it
+was not able to differentiate one event from another. To overcome that, more recent
+kernels and perf tools add support for the PERF_RECORD_AUX_OUTPUT_HW_ID side-band event.
+To check for the presence of that event in a PEBS-via-PT trace:
+
+ perf script -D --no-itrace | grep PERF_RECORD_AUX_OUTPUT_HW_ID
To display PEBS events from the Intel PT trace, use the itrace 'o' option e.g.
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index 85b8ac695c87..f378ac59353d 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -8,22 +8,25 @@ perf-kmem - Tool to trace/measure kernel memory properties
SYNOPSIS
--------
[verse]
-'perf kmem' {record|stat} [<options>]
+'perf kmem' [<options>] {record|stat}
DESCRIPTION
-----------
There are two variants of perf kmem:
- 'perf kmem record <command>' to record the kmem events
- of an arbitrary workload.
+ 'perf kmem [<options>] record [<perf-record-options>] <command>' to
+ record the kmem events of an arbitrary workload. Additional 'perf
+ record' options may be specified after record, such as '-o' to
+ change the output file name.
- 'perf kmem stat' to report kernel memory statistics.
+ 'perf kmem [<options>] stat' to report kernel memory statistics.
OPTIONS
-------
-i <file>::
--input=<file>::
- Select the input file (default: perf.data unless stdin is a fifo)
+ For stat, select the input file (default: perf.data unless stdin is a
+ fifo)
-f::
--force::
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 4c7db1da8fcc..4dc8d0af19df 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -39,6 +39,10 @@ any extra expressions computed by perf stat.
--deprecated::
Print deprecated events. By default the deprecated events are hidden.
+--cputype::
+Print events applying cpu with this type for hybrid platform
+(e.g. --cputype core or --cputype atom)
+
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index f1079ee7f2ec..2d7df8703cf2 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -596,6 +596,22 @@ options.
'perf record --dry-run -e' can act as a BPF script compiler if llvm.dump-obj
in config file is set to true.
+--synth=TYPE::
+Collect and synthesize given type of events (comma separated). Note that
+this option controls the synthesis from the /proc filesystem which represent
+task status for pre-existing threads.
+
+Kernel (and some other) events are recorded regardless of the
+choice in this option. For example, --synth=no would have MMAP events for
+kernel and modules.
+
+Available types are:
+ 'task' - synthesize FORK and COMM events for each task
+ 'mmap' - synthesize MMAP events for each process (implies 'task')
+ 'cgroup' - synthesize CGROUP events for each cgroup
+ 'all' - synthesize all events (default)
+ 'no' - do not synthesize any of the above events
+
--tail-synthesize::
Instead of collecting non-sample events (for example, fork, comm, mmap) at
the beginning of record, collect them during finalizing an output file.
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index f05c4d48fd7e..e728615a3830 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -17,6 +17,7 @@ tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/find_bit.c
tools/lib/bitmap.c
+tools/lib/list_sort.c
tools/lib/str_error_r.c
tools/lib/vsprintf.c
tools/lib/zalloc.c
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 14e3e8d702a0..4a9baed28f2e 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -1093,11 +1093,32 @@ ifdef LIBTRACEEVENT_DYNAMIC
$(call feature_check,libtraceevent)
ifeq ($(feature-libtraceevent), 1)
EXTLIBS += -ltraceevent
+ LIBTRACEEVENT_VERSION := $(shell $(PKG_CONFIG) --modversion libtraceevent)
+ LIBTRACEEVENT_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_CPP := $(shell expr $(LIBTRACEEVENT_VERSION_1) \* 255 \* 255 + $(LIBTRACEEVENT_VERSION_2) \* 255 + $(LIBTRACEEVENT_VERSION_3))
+ CFLAGS += -DLIBTRACEEVENT_VERSION=$(LIBTRACEEVENT_VERSION_CPP)
else
dummy := $(error Error: No libtraceevent devel library found, please install libtraceevent-devel);
endif
endif
+ifdef LIBTRACEFS_DYNAMIC
+ $(call feature_check,libtracefs)
+ ifeq ($(feature-libtracefs), 1)
+ EXTLIBS += -ltracefs
+ LIBTRACEFS_VERSION := $(shell $(PKG_CONFIG) --modversion libtracefs)
+ LIBTRACEFS_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_CPP := $(shell expr $(LIBTRACEFS_VERSION_1) \* 255 \* 255 + $(LIBTRACEFS_VERSION_2) \* 255 + $(LIBTRACEFS_VERSION_3))
+ CFLAGS += -DLIBTRACEFS_VERSION=$(LIBTRACEFS_VERSION_CPP)
+ else
+ dummy := $(error Error: No libtracefs devel library found, please install libtracefs-dev);
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
# perf_include_dir
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 5cd702062a04..1d2b73f99172 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -130,6 +130,8 @@ include ../scripts/utilities.mak
#
# Define LIBTRACEEVENT_DYNAMIC to enable libtraceevent dynamic linking
#
+# Define LIBTRACEFS_DYNAMIC to enable libtracefs dynamic linking
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -787,6 +789,8 @@ $(OUTPUT)dlfilters/%.o: dlfilters/%.c include/perf/perf_dlfilter.h
$(Q)$(MKDIR) -p $(OUTPUT)dlfilters
$(QUIET_CC)$(CC) -c -Iinclude $(EXTRA_CFLAGS) -o $@ -fpic $<
+.SECONDARY: $(DLFILTERS:.so=.o)
+
$(OUTPUT)dlfilters/%.so: $(OUTPUT)dlfilters/%.o
$(QUIET_LINK)$(CC) $(EXTRA_CFLAGS) -shared -o $@ $<
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
index 2234fbd0a912..d3a18f9c85f6 100644
--- a/tools/perf/arch/arm64/util/pmu.c
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -3,7 +3,7 @@
#include "../../../util/cpumap.h"
#include "../../../util/pmu.h"
-struct pmu_events_map *pmu_events_map__find(void)
+const struct pmu_events_map *pmu_events_map__find(void)
{
struct perf_pmu *pmu = NULL;
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 04e5dc07e93f..93339d17acc4 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -77,6 +77,8 @@ static const char *reg_names[] = {
[PERF_REG_POWERPC_PMC4] = "pmc4",
[PERF_REG_POWERPC_PMC5] = "pmc5",
[PERF_REG_POWERPC_PMC6] = "pmc6",
+ [PERF_REG_POWERPC_SDAR] = "sdar",
+ [PERF_REG_POWERPC_SIAR] = "siar",
};
static inline const char *__perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 58b2d610aadb..e8fe36b10d20 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -40,7 +40,7 @@ get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
return bufp;
}
-int arch_get_runtimeparam(struct pmu_event *pe)
+int arch_get_runtimeparam(const struct pmu_event *pe)
{
int count;
char path[PATH_MAX] = "/devices/hv_24x7/interface/";
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 8116a253f91f..8d07a78e742a 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -74,6 +74,8 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(pmc4, PERF_REG_POWERPC_PMC4),
SMPL_REG(pmc5, PERF_REG_POWERPC_PMC5),
SMPL_REG(pmc6, PERF_REG_POWERPC_PMC6),
+ SMPL_REG(sdar, PERF_REG_POWERPC_SDAR),
+ SMPL_REG(siar, PERF_REG_POWERPC_SIAR),
SMPL_REG_END
};
diff --git a/tools/perf/arch/riscv64/annotate/instructions.c b/tools/perf/arch/riscv64/annotate/instructions.c
new file mode 100644
index 000000000000..869a0eb28953
--- /dev/null
+++ b/tools/perf/arch/riscv64/annotate/instructions.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static
+struct ins_ops *riscv64__associate_ins_ops(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = NULL;
+
+ if (!strncmp(name, "jal", 3) ||
+ !strncmp(name, "jr", 2) ||
+ !strncmp(name, "call", 4))
+ ops = &call_ops;
+ else if (!strncmp(name, "ret", 3))
+ ops = &ret_ops;
+ else if (name[0] == 'j' || name[0] == 'b')
+ ops = &jump_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+
+ return ops;
+}
+
+static
+int riscv64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+ if (!arch->initialized) {
+ arch->associate_instruction_ops = riscv64__associate_ins_ops;
+ arch->initialized = true;
+ arch->objdump.comment_char = '#';
+ }
+
+ return 0;
+}
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 24ea12ec7e02..305872692bfd 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -144,9 +144,32 @@ static struct ins x86__instructions[] = {
{ .name = "xorps", .ops = &mov_ops, },
};
-static bool x86__ins_is_fused(struct arch *arch, const char *ins1,
+static bool amd__ins_is_fused(struct arch *arch, const char *ins1,
const char *ins2)
{
+ if (strstr(ins2, "jmp"))
+ return false;
+
+ /* Family >= 15h supports cmp/test + branch fusion */
+ if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
+ (strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
+ return true;
+ }
+
+ /* Family >= 19h supports some ALU + branch fusion */
+ if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
+ strstarts(ins1, "sub") || strstarts(ins1, "and") ||
+ strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
+ strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel__ins_is_fused(struct arch *arch, const char *ins1,
+ const char *ins2)
+{
if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
return false;
@@ -184,6 +207,9 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
if (ret == 3) {
arch->family = family;
arch->model = model;
+ arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
+ amd__ins_is_fused :
+ intel__ins_is_fused;
return 0;
}
diff --git a/tools/perf/bench/evlist-open-close.c b/tools/perf/bench/evlist-open-close.c
index 83e9897c64a1..75a53919126b 100644
--- a/tools/perf/bench/evlist-open-close.c
+++ b/tools/perf/bench/evlist-open-close.c
@@ -25,6 +25,11 @@ static int iterations = 100;
static int nr_events = 1;
static const char *event_string = "dummy";
+static inline u64 timeval2usec(struct timeval *tv)
+{
+ return tv->tv_sec * USEC_PER_SEC + tv->tv_usec;
+}
+
static struct record_opts opts = {
.sample_time = true,
.mmap_pages = UINT_MAX,
@@ -167,7 +172,7 @@ static int bench_evlist_open_close__run(char *evstr)
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
- runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ runtime_us = timeval2usec(&diff);
update_stats(&time_stats, runtime_us);
evlist__delete(evlist);
diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
index 05f7c923c745..7401ebbac100 100644
--- a/tools/perf/bench/synthesize.c
+++ b/tools/perf/bench/synthesize.c
@@ -80,7 +80,7 @@ static int do_run_single_threaded(struct perf_session *session,
NULL,
target, threads,
process_synthesized_event,
- data_mmap,
+ true, data_mmap,
nr_threads_synthesize);
if (err)
return err;
@@ -171,7 +171,7 @@ static int do_run_multi_threaded(struct target *target,
NULL,
target, NULL,
process_synthesized_event,
- false,
+ true, false,
nr_threads_synthesize);
if (err) {
perf_session__delete(session);
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index 61929f63a047..6cb3f6cc36d0 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -1121,8 +1121,6 @@ static int setup_config(struct daemon *daemon)
#ifndef F_TLOCK
#define F_TLOCK 2
-#include <sys/file.h>
-
static int lockf(int fd, int cmd, off_t len)
{
if (cmd != F_TLOCK || len != 0)
@@ -1403,8 +1401,10 @@ out:
static int send_cmd_list(struct daemon *daemon)
{
- union cmd cmd = { .cmd = CMD_LIST, };
+ union cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.list.cmd = CMD_LIST;
cmd.list.verbose = verbose;
cmd.list.csv_sep = daemon->csv_sep ? *daemon->csv_sep : 0;
@@ -1432,6 +1432,7 @@ static int __cmd_signal(struct daemon *daemon, struct option parent_options[],
return -1;
}
+ memset(&cmd, 0, sizeof(cmd));
cmd.signal.cmd = CMD_SIGNAL,
cmd.signal.sig = SIGUSR2;
strncpy(cmd.signal.name, name, sizeof(cmd.signal.name) - 1);
@@ -1446,7 +1447,7 @@ static int __cmd_stop(struct daemon *daemon, struct option parent_options[],
OPT_PARENT(parent_options),
OPT_END()
};
- union cmd cmd = { .cmd = CMD_STOP, };
+ union cmd cmd;
argc = parse_options(argc, argv, start_options, daemon_usage, 0);
if (argc)
@@ -1457,6 +1458,8 @@ static int __cmd_stop(struct daemon *daemon, struct option parent_options[],
return -1;
}
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = CMD_STOP;
return send_cmd(daemon, &cmd);
}
@@ -1470,7 +1473,7 @@ static int __cmd_ping(struct daemon *daemon, struct option parent_options[],
OPT_PARENT(parent_options),
OPT_END()
};
- union cmd cmd = { .cmd = CMD_PING, };
+ union cmd cmd;
argc = parse_options(argc, argv, ping_options, daemon_usage, 0);
if (argc)
@@ -1481,6 +1484,8 @@ static int __cmd_ping(struct daemon *daemon, struct option parent_options[],
return -1;
}
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = CMD_PING;
scnprintf(cmd.ping.name, sizeof(cmd.ping.name), "%s", name);
return send_cmd(daemon, &cmd);
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 6ad191e731fc..ac6c570029e3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -815,7 +815,8 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
inject->tool.auxtrace = perf_event__process_auxtrace;
inject->tool.aux = perf_event__drop_aux;
- inject->tool.itrace_start = perf_event__drop_aux,
+ inject->tool.itrace_start = perf_event__drop_aux;
+ inject->tool.aux_output_hw_id = perf_event__drop_aux;
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
@@ -882,6 +883,7 @@ int cmd_inject(int argc, const char **argv)
.lost_samples = perf_event__repipe,
.aux = perf_event__repipe,
.itrace_start = perf_event__repipe,
+ .aux_output_hw_id = perf_event__repipe,
.context_switch = perf_event__repipe,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index aa1b127ffb5b..c6f352ee57e6 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1456,7 +1456,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
perf_session__set_id_hdr_size(kvm->session);
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
- kvm->evlist->core.threads, false, 1);
+ kvm->evlist->core.threads, true, false, 1);
err = kvm_live_open_events(kvm);
if (err)
goto out;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 10ab5e40a34f..468958154ed9 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -12,6 +12,7 @@
#include "util/parse-events.h"
#include "util/pmu.h"
+#include "util/pmu-hybrid.h"
#include "util/debug.h"
#include "util/metricgroup.h"
#include <subcmd/pager.h>
@@ -20,13 +21,15 @@
static bool desc_flag = true;
static bool details_flag;
+static const char *hybrid_type;
int cmd_list(int argc, const char **argv)
{
- int i;
+ int i, ret = 0;
bool raw_dump = false;
bool long_desc_flag = false;
bool deprecated = false;
+ char *pmu_name = NULL;
struct option list_options[] = {
OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
OPT_BOOLEAN('d', "desc", &desc_flag,
@@ -37,6 +40,9 @@ int cmd_list(int argc, const char **argv)
"Print information on the perf event names and expressions used internally by events."),
OPT_BOOLEAN(0, "deprecated", &deprecated,
"Print deprecated events."),
+ OPT_STRING(0, "cputype", &hybrid_type, "hybrid cpu type",
+ "Print events applying cpu with this type for hybrid platform "
+ "(e.g. core or atom)"),
OPT_INCR(0, "debug", &verbose,
"Enable debugging output"),
OPT_END()
@@ -56,10 +62,16 @@ int cmd_list(int argc, const char **argv)
if (!raw_dump && pager_in_use())
printf("\nList of pre-defined events (to be used in -e):\n\n");
+ if (hybrid_type) {
+ pmu_name = perf_pmu__hybrid_type_to_pmu(hybrid_type);
+ if (!pmu_name)
+ pr_warning("WARNING: hybrid cputype is not supported!\n");
+ }
+
if (argc == 0) {
print_events(NULL, raw_dump, !desc_flag, long_desc_flag,
- details_flag, deprecated);
- return 0;
+ details_flag, deprecated, pmu_name);
+ goto out;
}
for (i = 0; i < argc; ++i) {
@@ -82,25 +94,27 @@ int cmd_list(int argc, const char **argv)
else if (strcmp(argv[i], "pmu") == 0)
print_pmu_events(NULL, raw_dump, !desc_flag,
long_desc_flag, details_flag,
- deprecated);
+ deprecated, pmu_name);
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0)
- metricgroup__print(true, false, NULL, raw_dump, details_flag);
+ metricgroup__print(true, false, NULL, raw_dump, details_flag, pmu_name);
else if (strcmp(argv[i], "metricgroup") == 0 || strcmp(argv[i], "metricgroups") == 0)
- metricgroup__print(false, true, NULL, raw_dump, details_flag);
+ metricgroup__print(false, true, NULL, raw_dump, details_flag, pmu_name);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
sep_idx = sep - argv[i];
s = strdup(argv[i]);
- if (s == NULL)
- return -1;
+ if (s == NULL) {
+ ret = -1;
+ goto out;
+ }
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump, details_flag);
+ metricgroup__print(true, true, s, raw_dump, details_flag, pmu_name);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -116,12 +130,16 @@ int cmd_list(int argc, const char **argv)
print_pmu_events(s, raw_dump, !desc_flag,
long_desc_flag,
details_flag,
- deprecated);
+ deprecated,
+ pmu_name);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, s, raw_dump, details_flag);
+ metricgroup__print(true, true, s, raw_dump, details_flag, pmu_name);
free(s);
}
}
- return 0;
+
+out:
+ free(pmu_name);
+ return ret;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b3509d9d20cc..78185c982ebf 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1255,6 +1255,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
{
int err;
struct perf_thread_map *thread_map;
+ bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
if (rec->opts.tail_synthesize != tail)
return 0;
@@ -1266,6 +1267,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
+ needs_mmap,
rec->opts.sample_address);
perf_thread_map__put(thread_map);
return err;
@@ -1409,7 +1411,7 @@ static int record__synthesize(struct record *rec, bool tail)
goto out;
/* Synthesize id_index before auxtrace_info */
- if (rec->opts.auxtrace_sample_mode) {
+ if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) {
err = perf_event__synthesize_id_index(tool,
process_synthesized_event,
session->evlist, machine);
@@ -1470,19 +1472,26 @@ static int record__synthesize(struct record *rec, bool tail)
if (err < 0)
pr_warning("Couldn't synthesize bpf events.\n");
- err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
- machine);
- if (err < 0)
- pr_warning("Couldn't synthesize cgroup events.\n");
+ if (rec->opts.synth & PERF_SYNTH_CGROUP) {
+ err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
+ machine);
+ if (err < 0)
+ pr_warning("Couldn't synthesize cgroup events.\n");
+ }
if (rec->opts.nr_threads_synthesize > 1) {
perf_set_multithreaded();
f = process_locked_synthesized_event;
}
- err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
- f, opts->sample_address,
- rec->opts.nr_threads_synthesize);
+ if (rec->opts.synth & PERF_SYNTH_TASK) {
+ bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
+
+ err = __machine__synthesize_threads(machine, tool, &opts->target,
+ rec->evlist->core.threads,
+ f, needs_mmap, opts->sample_address,
+ rec->opts.nr_threads_synthesize);
+ }
if (rec->opts.nr_threads_synthesize > 1)
perf_set_singlethreaded();
@@ -2391,6 +2400,26 @@ static int process_timestamp_boundary(struct perf_tool *tool,
return 0;
}
+static int parse_record_synth_option(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+ char *p = strdup(str);
+
+ if (p == NULL)
+ return -1;
+
+ opts->synth = parse_synth_opt(p);
+ free(p);
+
+ if (opts->synth < 0) {
+ pr_err("Invalid synth option: %s\n", str);
+ return -1;
+ }
+ return 0;
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -2416,6 +2445,7 @@ static struct record record = {
.nr_threads_synthesize = 1,
.ctl_fd = -1,
.ctl_fd_ack = -1,
+ .synth = PERF_SYNTH_ALL,
},
.tool = {
.sample = process_sample_event,
@@ -2631,6 +2661,8 @@ static struct option __record_options[] = {
"\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
"\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
parse_control_option),
+ OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
+ "Fine-tune event synthesis: default=all", parse_record_synth_option),
OPT_END()
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index a3ae9176a83e..020c4f110c10 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1271,7 +1271,7 @@ static int __cmd_top(struct perf_top *top)
pr_debug("Couldn't synthesize cgroup events.\n");
machine__synthesize_threads(&top->session->machines.host, &opts->target,
- top->evlist->core.threads, false,
+ top->evlist->core.threads, true, false,
top->nr_threads_synthesize);
if (top->nr_threads_synthesize > 1)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2bf21194c7b3..2f1d20553a0a 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1628,8 +1628,8 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
goto out;
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
- evlist->core.threads, trace__tool_process, false,
- 1);
+ evlist->core.threads, trace__tool_process,
+ true, false, 1);
out:
if (err)
symbol__exit();
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index f1e46277e822..30ecf3a0f68b 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -26,6 +26,7 @@ include/vdso/bits.h
include/linux/const.h
include/vdso/const.h
include/linux/hash.h
+include/linux/list-sort.h
include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/required-features.h
@@ -150,6 +151,7 @@ check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
check include/linux/ctype.h '-I "isdigit("'
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
+check lib/list_sort.c '-I "^#include <linux/bug.h>"'
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
index 9bea1ba1c4d2..cf48d0dfc759 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
@@ -18,6 +18,6 @@
"ArchStdEvent": "BUS_ACCESS_PERIPH"
},
{
- "ArchStdEvent": "BUS_ACCESS",
+ "ArchStdEvent": "BUS_ACCESS"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
index 1e25f2ae4ae0..4cc50b7da526 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
@@ -39,31 +39,31 @@
"ArchStdEvent": "L2D_CACHE_INVAL"
},
{
- "ArchStdEvent": "L1I_CACHE_REFILL",
+ "ArchStdEvent": "L1I_CACHE_REFILL"
},
{
- "ArchStdEvent": "L1I_TLB_REFILL",
+ "ArchStdEvent": "L1I_TLB_REFILL"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL",
+ "ArchStdEvent": "L1D_CACHE_REFILL"
},
{
- "ArchStdEvent": "L1D_CACHE",
+ "ArchStdEvent": "L1D_CACHE"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL",
+ "ArchStdEvent": "L1D_TLB_REFILL"
},
{
- "ArchStdEvent": "L1I_CACHE",
+ "ArchStdEvent": "L1I_CACHE"
},
{
- "ArchStdEvent": "L2D_CACHE",
+ "ArchStdEvent": "L2D_CACHE"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL",
+ "ArchStdEvent": "L2D_CACHE_REFILL"
},
{
- "ArchStdEvent": "L2D_CACHE_WB",
+ "ArchStdEvent": "L2D_CACHE_WB"
},
{
"PublicDescription": "This event counts any load or store operation which accesses the data L1 TLB",
@@ -72,7 +72,7 @@
},
{
"PublicDescription": "This event counts any instruction fetch which accesses the instruction L1 TLB",
- "ArchStdEvent": "L1I_TLB",
+ "ArchStdEvent": "L1I_TLB"
},
{
"PublicDescription": "Level 2 access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
index 9076ca2daf9e..927a6f629a03 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
@@ -1,7 +1,7 @@
[
{
"PublicDescription": "The number of core clock cycles",
- "ArchStdEvent": "CPU_CYCLES",
+ "ArchStdEvent": "CPU_CYCLES"
},
{
"PublicDescription": "FSU clocking gated off cycle",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
index 9761433ad329..ada052e19632 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
@@ -36,9 +36,9 @@
"ArchStdEvent": "EXC_TRAP_FIQ"
},
{
- "ArchStdEvent": "EXC_TAKEN",
+ "ArchStdEvent": "EXC_TAKEN"
},
{
- "ArchStdEvent": "EXC_RETURN",
+ "ArchStdEvent": "EXC_RETURN"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
index 482aa3f19e58..62f6276e3016 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
@@ -44,25 +44,25 @@
"BriefDescription": "Software increment"
},
{
- "ArchStdEvent": "INST_RETIRED",
+ "ArchStdEvent": "INST_RETIRED"
},
{
"ArchStdEvent": "CID_WRITE_RETIRED",
"BriefDescription": "Write to CONTEXTIDR"
},
{
- "ArchStdEvent": "INST_SPEC",
+ "ArchStdEvent": "INST_SPEC"
},
{
- "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
},
{
"PublicDescription": "This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches",
- "ArchStdEvent": "BR_RETIRED",
+ "ArchStdEvent": "BR_RETIRED"
},
{
"PublicDescription": "This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush",
- "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
},
{
"PublicDescription": "Operation speculatively executed, NOP",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
index 2e7555696caf..50157e8c2005 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
@@ -15,10 +15,10 @@
"ArchStdEvent": "UNALIGNED_LDST_SPEC"
},
{
- "ArchStdEvent": "MEM_ACCESS",
+ "ArchStdEvent": "MEM_ACCESS"
},
{
"PublicDescription": "This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
- "ArchStdEvent": "MEMORY_ERROR",
+ "ArchStdEvent": "MEMORY_ERROR"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
index ec0dc92288ab..db68de188390 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
@@ -1,10 +1,10 @@
[
{
"PublicDescription": "This event counts any predictable branch instruction which is mispredicted either due to dynamic misprediction or because the MMU is off and the branches are statically predicted not taken",
- "ArchStdEvent": "BR_MIS_PRED",
+ "ArchStdEvent": "BR_MIS_PRED"
},
{
"PublicDescription": "This event counts all predictable branches.",
- "ArchStdEvent": "BR_PRED",
+ "ArchStdEvent": "BR_PRED"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
index 6263929efce2..e0875d3a685d 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
@@ -1,21 +1,21 @@
[
{
- "PublicDescription": "The number of core clock cycles"
+ "PublicDescription": "The number of core clock cycles",
"ArchStdEvent": "CPU_CYCLES",
"BriefDescription": "The number of core clock cycles."
},
{
"PublicDescription": "This event counts for every beat of data transferred over the data channels between the core and the SCU. If both read and write data beats are transferred on a given cycle, this event is counted twice on that cycle. This event counts the sum of BUS_ACCESS_RD and BUS_ACCESS_WR.",
- "ArchStdEvent": "BUS_ACCESS",
+ "ArchStdEvent": "BUS_ACCESS"
},
{
- "PublicDescription": "This event duplicates CPU_CYCLES."
- "ArchStdEvent": "BUS_CYCLES",
+ "PublicDescription": "This event duplicates CPU_CYCLES.",
+ "ArchStdEvent": "BUS_CYCLES"
},
{
- "ArchStdEvent": "BUS_ACCESS_RD",
+ "ArchStdEvent": "BUS_ACCESS_RD"
},
{
- "ArchStdEvent": "BUS_ACCESS_WR",
+ "ArchStdEvent": "BUS_ACCESS_WR"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
index cd67bb9df139..fc448c2d5ea4 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
@@ -1,47 +1,47 @@
[
{
"PublicDescription": "This event counts any instruction fetch which misses in the cache.",
- "ArchStdEvent": "L1I_CACHE_REFILL",
+ "ArchStdEvent": "L1I_CACHE_REFILL"
},
{
"PublicDescription": "This event counts any refill of the instruction L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
- "ArchStdEvent": "L1I_TLB_REFILL",
+ "ArchStdEvent": "L1I_TLB_REFILL"
},
{
"PublicDescription": "This event counts any load or store operation or page table walk access which causes data to be read from outside the L1, including accesses which do not allocate into L1.",
- "ArchStdEvent": "L1D_CACHE_REFILL",
+ "ArchStdEvent": "L1D_CACHE_REFILL"
},
{
"PublicDescription": "This event counts any load or store operation or page table walk access which looks up in the L1 data cache. In particular, any access which could count the L1D_CACHE_REFILL event causes this event to count.",
- "ArchStdEvent": "L1D_CACHE",
+ "ArchStdEvent": "L1D_CACHE"
},
{
"PublicDescription": "This event counts any refill of the data L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
- "ArchStdEvent": "L1D_TLB_REFILL",
+ "ArchStdEvent": "L1D_TLB_REFILL"
},
- {,
+ {
"PublicDescription": "Level 1 instruction cache access or Level 0 Macro-op cache access. This event counts any instruction fetch which accesses the L1 instruction cache or L0 Macro-op cache.",
- "ArchStdEvent": "L1I_CACHE",
+ "ArchStdEvent": "L1I_CACHE"
},
{
"PublicDescription": "This event counts any write-back of data from the L1 data cache to L2 or L3. This counts both victim line evictions and snoops, including cache maintenance operations.",
- "ArchStdEvent": "L1D_CACHE_WB",
+ "ArchStdEvent": "L1D_CACHE_WB"
},
{
"PublicDescription": "This event counts any transaction from L1 which looks up in the L2 cache, and any write-back from the L1 to the L2. Snoops from outside the core and cache maintenance operations are not counted.",
- "ArchStdEvent": "L2D_CACHE",
+ "ArchStdEvent": "L2D_CACHE"
},
{
"PublicDescription": "L2 data cache refill. This event counts any cacheable transaction from L1 which causes data to be read from outside the core. L2 refills caused by stashes into L2 should not be counted",
- "ArchStdEvent": "L2D_CACHE_REFILL",
+ "ArchStdEvent": "L2D_CACHE_REFILL"
},
{
"PublicDescription": "This event counts any write-back of data from the L2 cache to outside the core. This includes snoops to the L2 which return data, regardless of whether they cause an invalidation. Invalidations from the L2 which do not write data outside of the core and snoops which return data from the L1 are not counted",
- "ArchStdEvent": "L2D_CACHE_WB",
+ "ArchStdEvent": "L2D_CACHE_WB"
},
{
"PublicDescription": "This event counts any full cache line write into the L2 cache which does not cause a linefill, including write-backs from L1 to L2 and full-line writes which do not allocate into L1.",
- "ArchStdEvent": "L2D_CACHE_ALLOCATE",
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
},
{
"PublicDescription": "This event counts any load or store operation which accesses the data L1 TLB. If both a load and a store are executed on a cycle, this event counts twice. This event counts regardless of whether the MMU is enabled.",
@@ -75,21 +75,21 @@
},
{
"PublicDescription": "This event counts on any access to the L2 TLB (caused by a refill of any of the L1 TLBs). This event does not count if the MMU is disabled.",
- "ArchStdEvent": "L2D_TLB",
+ "ArchStdEvent": "L2D_TLB"
},
{
"PublicDescription": "This event counts on any data access which causes L2D_TLB_REFILL to count.",
- "ArchStdEvent": "DTLB_WALK",
+ "ArchStdEvent": "DTLB_WALK"
},
{
"PublicDescription": "This event counts on any instruction access which causes L2D_TLB_REFILL to count.",
- "ArchStdEvent": "ITLB_WALK",
+ "ArchStdEvent": "ITLB_WALK"
},
{
- "ArchStdEvent": "LL_CACHE_RD",
+ "ArchStdEvent": "LL_CACHE_RD"
},
{
- "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
},
{
"ArchStdEvent": "L1D_CACHE_INVAL"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
index ea4631db41b5..ce942324ee60 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
@@ -1,10 +1,10 @@
[
{
- "ArchStdEvent": "EXC_TAKEN",
+ "ArchStdEvent": "EXC_TAKEN"
},
{
"PublicDescription": "This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
- "ArchStdEvent": "MEMORY_ERROR",
+ "ArchStdEvent": "MEMORY_ERROR"
},
{
"ArchStdEvent": "EXC_DABORT"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
index 8e59566cba8b..b0b439a36ae9 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
@@ -1,32 +1,32 @@
[
{
- "ArchStdEvent": "SW_INCR",
+ "ArchStdEvent": "SW_INCR"
},
{
"PublicDescription": "This event counts all retired instructions, including those that fail their condition check.",
- "ArchStdEvent": "INST_RETIRED",
+ "ArchStdEvent": "INST_RETIRED"
},
{
- "ArchStdEvent": "EXC_RETURN",
+ "ArchStdEvent": "EXC_RETURN"
},
{
"PublicDescription": "This event only counts writes to CONTEXTIDR in AArch32 state, and via the CONTEXTIDR_EL1 mnemonic in AArch64 state.",
- "ArchStdEvent": "CID_WRITE_RETIRED",
+ "ArchStdEvent": "CID_WRITE_RETIRED"
},
{
- "ArchStdEvent": "INST_SPEC",
+ "ArchStdEvent": "INST_SPEC"
},
{
"PublicDescription": "This event only counts writes to TTBR0/TTBR1 in AArch32 state and TTBR0_EL1/TTBR1_EL1 in AArch64 state.",
- "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
},
- {,
+ {
"PublicDescription": "This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches.",
- "ArchStdEvent": "BR_RETIRED",
+ "ArchStdEvent": "BR_RETIRED"
},
{
"PublicDescription": "This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush.",
- "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
},
{
"ArchStdEvent": "ASE_SPEC"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
index f06f399051c1..20a929e7728d 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
@@ -1,7 +1,7 @@
[
{
"PublicDescription": "This event counts memory accesses due to load or store instructions. This event counts the sum of MEM_ACCESS_RD and MEM_ACCESS_WR.",
- "ArchStdEvent": "MEM_ACCESS",
+ "ArchStdEvent": "MEM_ACCESS"
},
{
"ArchStdEvent": "MEM_ACCESS_RD"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
index c2ccbf6fbfa0..20d8365756c5 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
@@ -1,5 +1,5 @@
[
{
- "ArchStdEvent": "REMOTE_ACCESS",
+ "ArchStdEvent": "REMOTE_ACCESS"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
index d79f0aeaf7f1..b4e96551d51a 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
@@ -1,10 +1,10 @@
[
{
"PublicDescription": "The counter counts on any cycle when there are no fetched instructions available to dispatch.",
- "ArchStdEvent": "STALL_FRONTEND",
+ "ArchStdEvent": "STALL_FRONTEND"
},
{
"PublicDescription": "The counter counts on any cycle fetched instructions are not dispatched due to resource constraints.",
- "ArchStdEvent": "STALL_BACKEND",
+ "ArchStdEvent": "STALL_BACKEND"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
new file mode 100644
index 000000000000..79f2016c53b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
new file mode 100644
index 000000000000..25825e14c535
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
@@ -0,0 +1,89 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
new file mode 100644
index 000000000000..e3d08f1f7c92
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/other.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/other.json
new file mode 100644
index 000000000000..20d8365756c5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/other.json
@@ -0,0 +1,5 @@
+[
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
index 913fb200ea52..423767510aff 100644
--- a/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json
@@ -258,6 +258,78 @@
"BriefDescription": "Last level cache miss, read"
},
{
+ "PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory read access counted by L1D_CACHE that incurs additional latency because it returns data from outside the Level 1 data or unified cache of this processing element.",
+ "EventCode": "0x39",
+ "EventName": "L1D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 1 data cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Micro-operation architecturally executed. The counter counts each operation counted by OP_SPEC that would be executed in a simple sequential execution of the program.",
+ "EventCode": "0x3A",
+ "EventName": "OP_RETIRED",
+ "BriefDescription": "Micro-operation architecturally executed"
+ },
+ {
+ "PublicDescription": "Micro-operation speculatively executed. The counter counts the number of operations executed by the processing element, including those that are executed speculatively and would not be executed in a simple sequential execution of the program.",
+ "EventCode": "0x3B",
+ "EventName": "OP_SPEC",
+ "BriefDescription": "Micro-operation speculatively executed"
+ },
+ {
+ "PublicDescription": "No operation sent for execution. The counter counts every attributable cycle on which no attributable instruction or operation was sent for execution on this processing element.",
+ "EventCode": "0x3C",
+ "EventName": "STALL",
+ "BriefDescription": "No operation sent for execution"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot due to the backend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because the backend is unable to accept it.",
+ "EventCode": "0x3D",
+ "EventName": "STALL_SLOT_BACKEND",
+ "BriefDescription": "No operation sent for execution on a slot due to the backend"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot due to the frontend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because there was no attributable instruction or operation available to issue from the processing element from the frontend for the slot.",
+ "EventCode": "0x3E",
+ "EventName": "STALL_SLOT_FRONTEND",
+ "BriefDescription": "No operation sent for execution on a slot due to the frontend"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot. The counter counts on each attributable cycle the number of instruction or operation slots that were not occupied by an instruction or operation attributable to the processing element.",
+ "EventCode": "0x3F",
+ "EventName": "STALL_SLOT",
+ "BriefDescription": "No operation sent for execution on a slot"
+ },
+ {
+ "PublicDescription": "Constant frequency cycles. The counter increments at a constant frequency equal to the rate of increment of the system counter, CNTPCT_EL0.",
+ "EventCode": "0x4004",
+ "EventName": "CNT_CYCLES",
+ "BriefDescription": "Constant frequency cycles"
+ },
+ {
+ "PublicDescription": "Memory stall cycles. The counter counts each cycle counted by STALL_BACKEND where there is a cache miss in the last level of cache within the processing element clock domain",
+ "EventCode": "0x4005",
+ "EventName": "STALL_BACKEND_MEM",
+ "BriefDescription": "Memory stall cycles"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache long-latency read miss. If the L1I_CACHE_RD event is implemented, the counter counts each access counted by L1I_CACHE_RD that incurs additional latency because it returns instructions from outside of the Level 1 instruction cache of this PE. If the L1I_CACHE_RD event is not implemented, the counter counts each access counted by L1I_CACHE that incurs additional latency because it returns instructions from outside the Level 1 instruction cache of this PE. The event indicates to software that the access missed in the Level 1 instruction cache and might have a significant performance impact due to the additional latency, compared to the latency of an access that hits in the Level 1 instruction cache.",
+ "EventCode": "0x4006",
+ "EventName": "L1I_CACHE_LMISS",
+ "BriefDescription": "Level 1 instruction cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Level 2 data cache long-latency read miss. The counter counts each memory read access counted by L2D_CACHE that incurs additional latency because it returns data from outside the Level 2 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 2 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 2 data or unified cache.",
+ "EventCode": "0x4009",
+ "EventName": "L2D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 2 data cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Level 3 data cache long-latency read miss. The counter counts each memory read access counted by L3D_CACHE that incurs additional latency because it returns data from outside the Level 3 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 3 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 3 data or unified cache.",
+ "EventCode": "0x400B",
+ "EventName": "L3D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 3 data cache long-latency read miss"
+ },
+ {
"PublicDescription": "SIMD Instruction architecturally executed.",
"EventCode": "0x8000",
"EventName": "SIMD_INST_RETIRED",
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
index dda8e59149d2..6970203cb247 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
@@ -229,5 +229,5 @@
"BriefDescription": "Store bound L3 topdown metric",
"MetricGroup": "TopDownL3",
"MetricName": "store_bound"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
index 61514d38601b..2b3cb55df288 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
@@ -1,56 +1,56 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_ddrc.flux_wr",
+ "ConfigCode": "0x00",
+ "EventName": "flux_wr",
"BriefDescription": "DDRC total write operations",
"PublicDescription": "DDRC total write operations",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_ddrc.flux_rd",
+ "ConfigCode": "0x01",
+ "EventName": "flux_rd",
"BriefDescription": "DDRC total read operations",
"PublicDescription": "DDRC total read operations",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_ddrc.flux_wcmd",
+ "ConfigCode": "0x02",
+ "EventName": "flux_wcmd",
"BriefDescription": "DDRC write commands",
"PublicDescription": "DDRC write commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_ddrc.flux_rcmd",
+ "ConfigCode": "0x03",
+ "EventName": "flux_rcmd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x04",
- "EventName": "uncore_hisi_ddrc.pre_cmd",
+ "ConfigCode": "0x04",
+ "EventName": "pre_cmd",
"BriefDescription": "DDRC precharge commands",
"PublicDescription": "DDRC precharge commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x05",
- "EventName": "uncore_hisi_ddrc.act_cmd",
+ "ConfigCode": "0x05",
+ "EventName": "act_cmd",
"BriefDescription": "DDRC active commands",
"PublicDescription": "DDRC active commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x06",
- "EventName": "uncore_hisi_ddrc.rnk_chg",
+ "ConfigCode": "0x06",
+ "EventName": "rnk_chg",
"BriefDescription": "DDRC rank commands",
"PublicDescription": "DDRC rank commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x07",
- "EventName": "uncore_hisi_ddrc.rw_chg",
+ "ConfigCode": "0x07",
+ "EventName": "rw_chg",
"BriefDescription": "DDRC read and write changes",
"PublicDescription": "DDRC read and write changes",
"Unit": "hisi_sccl,ddrc"
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
index ada86782933f..9a7ec7af2060 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
@@ -1,72 +1,152 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_hha.rx_ops_num",
+ "ConfigCode": "0x00",
+ "EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"PublicDescription": "The number of all operations received by the HHA",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_hha.rx_outer",
+ "ConfigCode": "0x01",
+ "EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"PublicDescription": "The number of all operations received by the HHA from another socket",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_hha.rx_sccl",
+ "ConfigCode": "0x02",
+ "EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_hha.rx_ccix",
+ "ConfigCode": "0x03",
+ "EventName": "rx_ccix",
"BriefDescription": "Count of the number of operations that HHA has received from CCIX",
"PublicDescription": "Count of the number of operations that HHA has received from CCIX",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1c",
- "EventName": "uncore_hisi_hha.rd_ddr_64b",
+ "ConfigCode": "0x4",
+ "EventName": "rx_wbi",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x5",
+ "EventName": "rx_wbip",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x11",
+ "EventName": "rx_wtistash",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x1c",
+ "EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1d",
- "EventName": "uncore_hisi_hha.wr_ddr_64b",
+ "ConfigCode": "0x1d",
+ "EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1e",
- "EventName": "uncore_hisi_hha.rd_ddr_128b",
+ "ConfigCode": "0x1e",
+ "EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1f",
- "EventName": "uncore_hisi_hha.wr_ddr_128b",
+ "ConfigCode": "0x1f",
+ "EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x20",
- "EventName": "uncore_hisi_hha.spill_num",
+ "ConfigCode": "0x20",
+ "EventName": "spill_num",
"BriefDescription": "Count of the number of spill operations that the HHA has sent",
"PublicDescription": "Count of the number of spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x21",
- "EventName": "uncore_hisi_hha.spill_success",
+ "ConfigCode": "0x21",
+ "EventName": "spill_success",
"BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
"PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x23",
+ "EventName": "bi_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x32",
+ "EventName": "mediated_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x33",
+ "EventName": "tx_snp_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x34",
+ "EventName": "tx_snp_outer",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x35",
+ "EventName": "tx_snp_ccix",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x38",
+ "EventName": "rx_snprspdata",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x3c",
+ "EventName": "rx_snprsp_outer",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x40",
+ "EventName": "sdir-lookup",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x41",
+ "EventName": "edir-lookup",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x42",
+ "EventName": "sdir-hit",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x43",
+ "EventName": "edir-hit",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x4c",
+ "EventName": "sdir-home-migrate",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x4d",
+ "EventName": "edir-home-migrate",
+ "Unit": "hisi_sccl,hha"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
index 67ab19e8cf3a..e3479b65be9a 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
@@ -1,91 +1,91 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_l3c.rd_cpipe",
+ "ConfigCode": "0x00",
+ "EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"PublicDescription": "Total read accesses",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_l3c.wr_cpipe",
+ "ConfigCode": "0x01",
+ "EventName": "wr_cpipe",
"BriefDescription": "Total write accesses",
"PublicDescription": "Total write accesses",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_l3c.rd_hit_cpipe",
+ "ConfigCode": "0x02",
+ "EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_l3c.wr_hit_cpipe",
+ "ConfigCode": "0x03",
+ "EventName": "wr_hit_cpipe",
"BriefDescription": "Total write hits",
"PublicDescription": "Total write hits",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x04",
- "EventName": "uncore_hisi_l3c.victim_num",
+ "ConfigCode": "0x04",
+ "EventName": "victim_num",
"BriefDescription": "l3c precharge commands",
"PublicDescription": "l3c precharge commands",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x20",
- "EventName": "uncore_hisi_l3c.rd_spipe",
+ "ConfigCode": "0x20",
+ "EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x21",
- "EventName": "uncore_hisi_l3c.wr_spipe",
+ "ConfigCode": "0x21",
+ "EventName": "wr_spipe",
"BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x22",
- "EventName": "uncore_hisi_l3c.rd_hit_spipe",
+ "ConfigCode": "0x22",
+ "EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x23",
- "EventName": "uncore_hisi_l3c.wr_hit_spipe",
+ "ConfigCode": "0x23",
+ "EventName": "wr_hit_spipe",
"BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x29",
- "EventName": "uncore_hisi_l3c.back_invalid",
+ "ConfigCode": "0x29",
+ "EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"PublicDescription": "Count of the number of L3C back invalid operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x40",
- "EventName": "uncore_hisi_l3c.retry_cpu",
+ "ConfigCode": "0x40",
+ "EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x41",
- "EventName": "uncore_hisi_l3c.retry_ring",
+ "ConfigCode": "0x41",
+ "EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x42",
- "EventName": "uncore_hisi_l3c.prefetch_drop",
+ "ConfigCode": "0x42",
+ "EventName": "prefetch_drop",
"BriefDescription": "Count of the number of prefetch drops from this L3C",
"PublicDescription": "Count of the number of prefetch drops from this L3C",
"Unit": "hisi_sccl,l3c"
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index c43591d831b8..31d8b57ca9bb 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -18,6 +18,7 @@
0x00000000410fd080,v1,arm/cortex-a57-a72,core
0x00000000410fd0b0,v1,arm/cortex-a76-n1,core
0x00000000410fd0c0,v1,arm/cortex-a76-n1,core
+0x00000000410fd400,v1,arm/neoverse-v1,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
0x00000000460f0010,v1,fujitsu/a64fx,core
diff --git a/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json b/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
index 5347350c360c..3e7ac409d894 100644
--- a/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
+++ b/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
@@ -286,5 +286,5 @@
"EventCode": "0x21e",
"EventName": "pop25_inst",
"BriefDescription": "V3 POP25 instructions"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 2dd8dafff2ef..783de7f1aeaa 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -82,5 +82,5 @@
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
"PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index b6b7f29ca831..86bd8ba9391d 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -124,5 +124,5 @@
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
"PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 2dd8dafff2ef..783de7f1aeaa 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -82,5 +82,5 @@
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
"PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 5da8296b667e..1a5e4f89c57e 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -390,5 +390,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index 17fb5241928b..fc762e9f1d6e 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -54,5 +54,5 @@
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index 89e070727e1b..4942b20a1ea1 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -369,5 +369,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
index 17fb5241928b..fc762e9f1d6e 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
@@ -54,5 +54,5 @@
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
index c998e4f1d1d2..ad79189050a0 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
@@ -26,5 +26,5 @@
"EventName": "ECC_BLOCKED_CYCLES_COUNT",
"BriefDescription": "ECC Blocked Cycles Count",
"PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
index 24c4ba2a9ae5..8ac61f8f286b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
@@ -397,5 +397,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 2dd8dafff2ef..783de7f1aeaa 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -82,5 +82,5 @@
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
"PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index b7b42a870bb0..86b29fd181cf 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -166,5 +166,5 @@
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 2dd8dafff2ef..783de7f1aeaa 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -82,5 +82,5 @@
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
"PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index db286f19e7b6..3f28007d3892 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -110,5 +110,5 @@
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index 162251037219..f40cbed89418 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -243,5 +243,5 @@
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
index 788766f45dbc..73089c682f80 100644
--- a/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
@@ -38,5 +38,5 @@
"BriefDescription": "Total cache hits",
"PublicDescription": "Total cache hits",
"Unit": "imc"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
index 0f681a6e10ea..c7e7528db315 100644
--- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
+++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
@@ -6,4 +6,11 @@
"Unit": "sys_ddr_pmu",
"Compat": "v8"
},
+ {
+ "BriefDescription": "ccn read-cycles event",
+ "ConfigCode": "0x2c",
+ "EventName": "sys_ccn_pmu.read_cycles",
+ "Unit": "sys_ccn_pmu",
+ "Compat": "0x01"
+ }
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 57ddbb9f9b31..14b9a8ab15b9 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -311,5 +311,5 @@
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency"
- },
+ }
]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 7c887d37b893..2e7c4153875b 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -45,6 +45,7 @@
#include <sys/resource.h> /* getrlimit */
#include <ftw.h>
#include <sys/stat.h>
+#include <linux/compiler.h>
#include <linux/list.h>
#include "jsmn.h"
#include "json.h"
@@ -70,7 +71,7 @@ struct json_event {
char *metric_constraint;
};
-enum aggr_mode_class convert(const char *aggr_mode)
+static enum aggr_mode_class convert(const char *aggr_mode)
{
if (!strcmp(aggr_mode, "PerCore"))
return PerCore;
@@ -81,8 +82,6 @@ enum aggr_mode_class convert(const char *aggr_mode)
return -1;
}
-typedef int (*func)(void *data, struct json_event *je);
-
static LIST_HEAD(sys_event_tables);
struct sys_event_table {
@@ -361,7 +360,7 @@ static int close_table;
static void print_events_table_prefix(FILE *fp, const char *tblname)
{
- fprintf(fp, "struct pmu_event %s[] = {\n", tblname);
+ fprintf(fp, "static const struct pmu_event %s[] = {\n", tblname);
close_table = 1;
}
@@ -369,7 +368,7 @@ static int print_events_table_entry(void *data, struct json_event *je)
{
struct perf_entry_data *pd = data;
FILE *outfp = pd->outfp;
- char *topic = pd->topic;
+ char *topic_local = pd->topic;
/*
* TODO: Remove formatting chars after debugging to reduce
@@ -384,7 +383,7 @@ static int print_events_table_entry(void *data, struct json_event *je)
fprintf(outfp, "\t.desc = \"%s\",\n", je->desc);
if (je->compat)
fprintf(outfp, "\t.compat = \"%s\",\n", je->compat);
- fprintf(outfp, "\t.topic = \"%s\",\n", topic);
+ fprintf(outfp, "\t.topic = \"%s\",\n", topic_local);
if (je->long_desc && je->long_desc[0])
fprintf(outfp, "\t.long_desc = \"%s\",\n", je->long_desc);
if (je->pmu)
@@ -470,7 +469,7 @@ static void free_arch_std_events(void)
}
}
-static int save_arch_std_events(void *data, struct json_event *je)
+static int save_arch_std_events(void *data __maybe_unused, struct json_event *je)
{
struct event_struct *es;
@@ -575,10 +574,12 @@ static int json_events(const char *fn,
struct json_event je = {};
char *arch_std = NULL;
unsigned long long eventcode = 0;
+ unsigned long long configcode = 0;
struct msrmap *msr = NULL;
jsmntok_t *msrval = NULL;
jsmntok_t *precise = NULL;
jsmntok_t *obj = tok++;
+ bool configcode_present = false;
EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
for (j = 0; j < obj->size; j += 2) {
@@ -601,6 +602,12 @@ static int json_events(const char *fn,
addfield(map, &code, "", "", val);
eventcode |= strtoul(code, NULL, 0);
free(code);
+ } else if (json_streq(map, field, "ConfigCode")) {
+ char *code = NULL;
+ addfield(map, &code, "", "", val);
+ configcode |= strtoul(code, NULL, 0);
+ free(code);
+ configcode_present = true;
} else if (json_streq(map, field, "ExtSel")) {
char *code = NULL;
addfield(map, &code, "", "", val);
@@ -682,7 +689,10 @@ static int json_events(const char *fn,
addfield(map, &extra_desc, " ",
"(Precise event)", NULL);
}
- snprintf(buf, sizeof buf, "event=%#llx", eventcode);
+ if (configcode_present)
+ snprintf(buf, sizeof buf, "config=%#llx", configcode);
+ else
+ snprintf(buf, sizeof buf, "event=%#llx", eventcode);
addfield(map, &event, ",", buf, NULL);
if (je.desc && extra_desc)
addfield(map, &je.desc, " ", extra_desc, NULL);
@@ -786,7 +796,7 @@ static bool is_sys_dir(char *fname)
static void print_mapping_table_prefix(FILE *outfp)
{
- fprintf(outfp, "struct pmu_events_map pmu_events_map[] = {\n");
+ fprintf(outfp, "const struct pmu_events_map pmu_events_map[] = {\n");
}
static void print_mapping_table_suffix(FILE *outfp)
@@ -820,7 +830,7 @@ static void print_mapping_test_table(FILE *outfp)
static void print_system_event_mapping_table_prefix(FILE *outfp)
{
- fprintf(outfp, "\nstruct pmu_sys_events pmu_sys_event_tables[] = {");
+ fprintf(outfp, "\nconst struct pmu_sys_events pmu_sys_event_tables[] = {");
}
static void print_system_event_mapping_table_suffix(FILE *outfp)
@@ -1196,7 +1206,7 @@ int main(int argc, char *argv[])
const char *arch;
const char *output_file;
const char *start_dirname;
- char *err_string_ext = "";
+ const char *err_string_ext = "";
struct stat stbuf;
prog = basename(argv[0]);
diff --git a/tools/perf/pmu-events/jsmn.c b/tools/perf/pmu-events/jsmn.c
index 11d1fa18bfa5..831dc44c4558 100644
--- a/tools/perf/pmu-events/jsmn.c
+++ b/tools/perf/pmu-events/jsmn.c
@@ -24,6 +24,7 @@
#include <stdlib.h>
#include "jsmn.h"
+#define JSMN_STRICT
/*
* Allocates a fresh unused token from the token pool.
@@ -176,6 +177,14 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
jsmnerr_t r;
int i;
jsmntok_t *token;
+#ifdef JSMN_STRICT
+ /*
+ * Keeps track of whether a new object/list/primitive is expected. New items are only
+ * allowed after an opening brace, comma or colon. A closing brace after a comma is not
+ * valid JSON.
+ */
+ int expecting_item = 1;
+#endif
for (; parser->pos < len; parser->pos++) {
char c;
@@ -185,6 +194,10 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
switch (c) {
case '{':
case '[':
+#ifdef JSMN_STRICT
+ if (!expecting_item)
+ return JSMN_ERROR_INVAL;
+#endif
token = jsmn_alloc_token(parser, tokens, num_tokens);
if (token == NULL)
return JSMN_ERROR_NOMEM;
@@ -196,6 +209,10 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
break;
case '}':
case ']':
+#ifdef JSMN_STRICT
+ if (expecting_item)
+ return JSMN_ERROR_INVAL;
+#endif
type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
for (i = parser->toknext - 1; i >= 0; i--) {
token = &tokens[i];
@@ -219,6 +236,11 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
}
break;
case '\"':
+#ifdef JSMN_STRICT
+ if (!expecting_item)
+ return JSMN_ERROR_INVAL;
+ expecting_item = 0;
+#endif
r = jsmn_parse_string(parser, js, len, tokens,
num_tokens);
if (r < 0)
@@ -229,11 +251,15 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
case '\t':
case '\r':
case '\n':
- case ':':
- case ',':
case ' ':
break;
#ifdef JSMN_STRICT
+ case ':':
+ case ',':
+ if (expecting_item)
+ return JSMN_ERROR_INVAL;
+ expecting_item = 1;
+ break;
/*
* In strict mode primitives are:
* numbers and booleans.
@@ -253,6 +279,9 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
case 'f':
case 'n':
#else
+ case ':':
+ case ',':
+ break;
/*
* In non-strict mode every unquoted value
* is a primitive.
@@ -260,6 +289,12 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
/*FALL THROUGH */
default:
#endif
+
+#ifdef JSMN_STRICT
+ if (!expecting_item)
+ return JSMN_ERROR_INVAL;
+ expecting_item = 0;
+#endif
r = jsmn_parse_primitive(parser, js, len, tokens,
num_tokens);
if (r < 0)
@@ -282,7 +317,11 @@ jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
return JSMN_ERROR_PART;
}
+#ifdef JSMN_STRICT
+ return expecting_item ? JSMN_ERROR_INVAL : JSMN_SUCCESS;
+#else
return JSMN_SUCCESS;
+#endif
}
/*
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index 5c2bf7275c1c..6efe73976440 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -41,19 +41,19 @@ struct pmu_events_map {
const char *cpuid;
const char *version;
const char *type; /* core, uncore etc */
- struct pmu_event *table;
+ const struct pmu_event *table;
};
struct pmu_sys_events {
const char *name;
- struct pmu_event *table;
+ const struct pmu_event *table;
};
/*
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
-extern struct pmu_events_map pmu_events_map[];
-extern struct pmu_sys_events pmu_sys_event_tables[];
+extern const struct pmu_events_map pmu_events_map[];
+extern const struct pmu_sys_events pmu_sys_event_tables[];
#endif
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 9b4a765e4b73..f439bd49da19 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -606,7 +606,8 @@ static int do_test_code_reading(bool try_kcore)
}
ret = perf_event__synthesize_thread_map(NULL, threads,
- perf_event__process, machine, false);
+ perf_event__process, machine,
+ true, false);
if (ret < 0) {
pr_debug("perf_event__synthesize_thread_map failed\n");
goto out_err;
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index 0e46aeb843ce..aaad51aba12f 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -193,7 +193,7 @@ static int expand_metric_events(void)
.metric_name = NULL,
},
};
- struct pmu_events_map ev_map = {
+ const struct pmu_events_map ev_map = {
.cpuid = "test",
.version = "1",
.type = "core",
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 4d01051951cd..077783223ce0 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -1,16 +1,62 @@
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/expr.h"
+#include "util/smt.h"
#include "tests.h"
#include <stdlib.h>
#include <string.h>
#include <linux/zalloc.h>
+static int test_ids_union(void)
+{
+ struct hashmap *ids1, *ids2;
+
+ /* Empty union. */
+ ids1 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids1);
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 0);
+
+ /* Union {foo, bar} against {}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids1, strdup("foo")), 0);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids1, strdup("bar")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 2);
+
+ /* Union {foo, bar} against {foo}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("foo")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 2);
+
+ /* Union {foo, bar} against {bar,baz}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("bar")), 0);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("baz")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 3);
+
+ ids__free(ids1);
+
+ return 0;
+}
+
static int test(struct expr_parse_ctx *ctx, const char *e, double val2)
{
double val;
- if (expr__parse(&val, ctx, e, 1))
+ if (expr__parse(&val, ctx, e))
TEST_ASSERT_VAL("parse test failed", 0);
TEST_ASSERT_VAL("unexpected value", val == val2);
return 0;
@@ -22,67 +68,90 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
const char *p;
double val;
int ret;
- struct expr_parse_ctx ctx;
-
- expr__ctx_init(&ctx);
- expr__add_id_val(&ctx, strdup("FOO"), 1);
- expr__add_id_val(&ctx, strdup("BAR"), 2);
-
- ret = test(&ctx, "1+1", 2);
- ret |= test(&ctx, "FOO+BAR", 3);
- ret |= test(&ctx, "(BAR/2)%2", 1);
- ret |= test(&ctx, "1 - -4", 5);
- ret |= test(&ctx, "(FOO-1)*2 + (BAR/2)%2 - -4", 5);
- ret |= test(&ctx, "1-1 | 1", 1);
- ret |= test(&ctx, "1-1 & 1", 0);
- ret |= test(&ctx, "min(1,2) + 1", 2);
- ret |= test(&ctx, "max(1,2) + 1", 3);
- ret |= test(&ctx, "1+1 if 3*4 else 0", 2);
- ret |= test(&ctx, "1.1 + 2.1", 3.2);
- ret |= test(&ctx, ".1 + 2.", 2.1);
- ret |= test(&ctx, "d_ratio(1, 2)", 0.5);
- ret |= test(&ctx, "d_ratio(2.5, 0)", 0);
- ret |= test(&ctx, "1.1 < 2.2", 1);
- ret |= test(&ctx, "2.2 > 1.1", 1);
- ret |= test(&ctx, "1.1 < 1.1", 0);
- ret |= test(&ctx, "2.2 > 2.2", 0);
- ret |= test(&ctx, "2.2 < 1.1", 0);
- ret |= test(&ctx, "1.1 > 2.2", 0);
-
- if (ret)
+ struct expr_parse_ctx *ctx;
+
+ TEST_ASSERT_EQUAL("ids_union", test_ids_union(), 0);
+
+ ctx = expr__ctx_new();
+ TEST_ASSERT_VAL("expr__ctx_new", ctx);
+ expr__add_id_val(ctx, strdup("FOO"), 1);
+ expr__add_id_val(ctx, strdup("BAR"), 2);
+
+ ret = test(ctx, "1+1", 2);
+ ret |= test(ctx, "FOO+BAR", 3);
+ ret |= test(ctx, "(BAR/2)%2", 1);
+ ret |= test(ctx, "1 - -4", 5);
+ ret |= test(ctx, "(FOO-1)*2 + (BAR/2)%2 - -4", 5);
+ ret |= test(ctx, "1-1 | 1", 1);
+ ret |= test(ctx, "1-1 & 1", 0);
+ ret |= test(ctx, "min(1,2) + 1", 2);
+ ret |= test(ctx, "max(1,2) + 1", 3);
+ ret |= test(ctx, "1+1 if 3*4 else 0", 2);
+ ret |= test(ctx, "1.1 + 2.1", 3.2);
+ ret |= test(ctx, ".1 + 2.", 2.1);
+ ret |= test(ctx, "d_ratio(1, 2)", 0.5);
+ ret |= test(ctx, "d_ratio(2.5, 0)", 0);
+ ret |= test(ctx, "1.1 < 2.2", 1);
+ ret |= test(ctx, "2.2 > 1.1", 1);
+ ret |= test(ctx, "1.1 < 1.1", 0);
+ ret |= test(ctx, "2.2 > 2.2", 0);
+ ret |= test(ctx, "2.2 < 1.1", 0);
+ ret |= test(ctx, "1.1 > 2.2", 0);
+
+ if (ret) {
+ expr__ctx_free(ctx);
return ret;
+ }
p = "FOO/0";
- ret = expr__parse(&val, &ctx, p, 1);
+ ret = expr__parse(&val, ctx, p);
TEST_ASSERT_VAL("division by zero", ret == -1);
p = "BAR/";
- ret = expr__parse(&val, &ctx, p, 1);
+ ret = expr__parse(&val, ctx, p);
TEST_ASSERT_VAL("missing operand", ret == -1);
- expr__ctx_clear(&ctx);
- TEST_ASSERT_VAL("find other",
- expr__find_other("FOO + BAR + BAZ + BOZO", "FOO",
- &ctx, 1) == 0);
- TEST_ASSERT_VAL("find other", hashmap__size(&ctx.ids) == 3);
- TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BAR",
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("FOO + BAR + BAZ + BOZO", "FOO",
+ ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 3);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR",
(void **)&val_ptr));
- TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BAZ",
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ",
(void **)&val_ptr));
- TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BOZO",
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO",
(void **)&val_ptr));
- expr__ctx_clear(&ctx);
- TEST_ASSERT_VAL("find other",
- expr__find_other("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@",
- NULL, &ctx, 3) == 0);
- TEST_ASSERT_VAL("find other", hashmap__size(&ctx.ids) == 2);
- TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "EVENT1,param=3/",
+ expr__ctx_clear(ctx);
+ ctx->runtime = 3;
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@",
(void **)&val_ptr));
- TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "EVENT2,param=3/",
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@",
(void **)&val_ptr));
- expr__ctx_clear(&ctx);
+ /* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("EVENT1 if #smt_on else EVENT2",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids,
+ smt_on() ? "EVENT1" : "EVENT2",
+ (void **)&val_ptr));
+
+ /* The expression is a constant 1.0 without needing to evaluate EVENT1. */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("1.0 if EVENT1 > 100.0 else 1.0",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0);
+
+ expr__ctx_free(ctx);
return 0;
}
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 8d9d4cbff76d..6f2da7a72f67 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -135,7 +135,7 @@ static int synth_all(struct machine *machine)
{
return perf_event__synthesize_threads(NULL,
perf_event__process,
- machine, 0, 1);
+ machine, 1, 0, 1);
}
static int synth_process(struct machine *machine)
@@ -147,7 +147,7 @@ static int synth_process(struct machine *machine)
err = perf_event__synthesize_thread_map(NULL, map,
perf_event__process,
- machine, 0);
+ machine, 1, 0);
perf_thread_map__put(map);
return err;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index fd3556cc9ad4..8875e388563e 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -605,7 +605,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 10);
- TEST_ASSERT_VAL("wrong config", !term->config);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
/* config1 */
term = list_entry(term->list.next, struct parse_events_term, list);
@@ -614,7 +614,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 1);
- TEST_ASSERT_VAL("wrong config", !term->config);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config1"));
/* config2=3 */
term = list_entry(term->list.next, struct parse_events_term, list);
@@ -623,7 +623,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 3);
- TEST_ASSERT_VAL("wrong config", !term->config);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config2"));
/* umask=1*/
term = list_entry(term->list.next, struct parse_events_term, list);
@@ -661,7 +661,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
- TEST_ASSERT_VAL("wrong config", !term->config);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
return 0;
}
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 4f6f4904e852..dfc797ecc750 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -79,7 +79,7 @@ static struct pmu_event pme_test[] = {
}
};
-static struct pmu_events_map map = {
+static const struct pmu_events_map map = {
.cpuid = "test",
.version = "1",
.type = "core",
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 43743cf719ef..50b1299fe643 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -67,7 +67,7 @@ static const struct perf_pmu_test_event segment_reg_loads_any = {
.desc = "Number of segment register loads",
.topic = "other",
},
- .alias_str = "umask=0x80,(null)=0x30d40,event=0x6",
+ .alias_str = "umask=0x80,period=0x30d40,event=0x6",
.alias_long_desc = "Number of segment register loads",
};
@@ -78,7 +78,7 @@ static const struct perf_pmu_test_event dispatch_blocked_any = {
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
- .alias_str = "umask=0x20,(null)=0x30d40,event=0x9",
+ .alias_str = "umask=0x20,period=0x30d40,event=0x9",
.alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
};
@@ -89,7 +89,7 @@ static const struct perf_pmu_test_event eist_trans = {
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
- .alias_str = "umask=0,(null)=0x30d40,event=0x3a",
+ .alias_str = "umask=0,period=0x30d40,event=0x3a",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
@@ -146,7 +146,7 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe",
- .event = "event=0x2",
+ .event = "event=0x7",
.desc = "Total read hits. Unit: hisi_sccl,l3c ",
.topic = "uncore",
.long_desc = "Total read hits",
@@ -208,8 +208,23 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
.matching_pmu = "uncore_sys_ddr_pmu",
};
+static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
+ .event = {
+ .name = "sys_ccn_pmu.read_cycles",
+ .event = "config=0x2c",
+ .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .topic = "uncore",
+ .pmu = "uncore_sys_ccn_pmu",
+ .compat = "0x01",
+ },
+ .alias_str = "config=0x2c",
+ .alias_long_desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .matching_pmu = "uncore_sys_ccn_pmu",
+};
+
static const struct perf_pmu_test_event *sys_events[] = {
&sys_ddr_pmu_write_cycles,
+ &sys_ccn_pmu_read_cycles,
NULL
};
@@ -227,9 +242,9 @@ static bool is_same(const char *reference, const char *test)
return !strcmp(reference, test);
}
-static struct pmu_events_map *__test_pmu_get_events_map(void)
+static const struct pmu_events_map *__test_pmu_get_events_map(void)
{
- struct pmu_events_map *map;
+ const struct pmu_events_map *map;
for (map = &pmu_events_map[0]; map->cpuid; map++) {
if (!strcmp(map->cpuid, "testcpu"))
@@ -241,9 +256,9 @@ static struct pmu_events_map *__test_pmu_get_events_map(void)
return NULL;
}
-static struct pmu_event *__test_pmu_get_sys_events_table(void)
+static const struct pmu_event *__test_pmu_get_sys_events_table(void)
{
- struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
for ( ; tables->name; tables++) {
if (!strcmp("pme_test_soc_sys", tables->name))
@@ -253,8 +268,26 @@ static struct pmu_event *__test_pmu_get_sys_events_table(void)
return NULL;
}
-static int compare_pmu_events(struct pmu_event *e1, const struct pmu_event *e2)
+static int compare_pmu_events(const struct pmu_event *e1, const struct pmu_event *e2)
{
+ if (!is_same(e1->name, e2->name)) {
+ pr_debug2("testing event e1 %s: mismatched name string, %s vs %s\n",
+ e1->name, e1->name, e2->name);
+ return -1;
+ }
+
+ if (!is_same(e1->compat, e2->compat)) {
+ pr_debug2("testing event e1 %s: mismatched compat string, %s vs %s\n",
+ e1->name, e1->compat, e2->compat);
+ return -1;
+ }
+
+ if (!is_same(e1->event, e2->event)) {
+ pr_debug2("testing event e1 %s: mismatched event, %s vs %s\n",
+ e1->name, e1->event, e2->event);
+ return -1;
+ }
+
if (!is_same(e1->desc, e2->desc)) {
pr_debug2("testing event e1 %s: mismatched desc, %s vs %s\n",
e1->name, e1->desc, e2->desc);
@@ -273,6 +306,12 @@ static int compare_pmu_events(struct pmu_event *e1, const struct pmu_event *e2)
return -1;
}
+ if (!is_same(e1->pmu, e2->pmu)) {
+ pr_debug2("testing event e1 %s: mismatched pmu string, %s vs %s\n",
+ e1->name, e1->pmu, e2->pmu);
+ return -1;
+ }
+
if (!is_same(e1->unit, e2->unit)) {
pr_debug2("testing event e1 %s: mismatched unit, %s vs %s\n",
e1->name, e1->unit, e2->unit);
@@ -285,6 +324,12 @@ static int compare_pmu_events(struct pmu_event *e1, const struct pmu_event *e2)
return -1;
}
+ if (!is_same(e1->aggr_mode, e2->aggr_mode)) {
+ pr_debug2("testing event e1 %s: mismatched aggr_mode, %s vs %s\n",
+ e1->name, e1->aggr_mode, e2->aggr_mode);
+ return -1;
+ }
+
if (!is_same(e1->metric_expr, e2->metric_expr)) {
pr_debug2("testing event e1 %s: mismatched metric_expr, %s vs %s\n",
e1->name, e1->metric_expr, e2->metric_expr);
@@ -297,21 +342,21 @@ static int compare_pmu_events(struct pmu_event *e1, const struct pmu_event *e2)
return -1;
}
- if (!is_same(e1->deprecated, e2->deprecated)) {
- pr_debug2("testing event e1 %s: mismatched deprecated, %s vs %s\n",
- e1->name, e1->deprecated, e2->deprecated);
+ if (!is_same(e1->metric_group, e2->metric_group)) {
+ pr_debug2("testing event e1 %s: mismatched metric_group, %s vs %s\n",
+ e1->name, e1->metric_group, e2->metric_group);
return -1;
}
- if (!is_same(e1->pmu, e2->pmu)) {
- pr_debug2("testing event e1 %s: mismatched pmu string, %s vs %s\n",
- e1->name, e1->pmu, e2->pmu);
+ if (!is_same(e1->deprecated, e2->deprecated)) {
+ pr_debug2("testing event e1 %s: mismatched deprecated, %s vs %s\n",
+ e1->name, e1->deprecated, e2->deprecated);
return -1;
}
- if (!is_same(e1->compat, e2->compat)) {
- pr_debug2("testing event e1 %s: mismatched compat string, %s vs %s\n",
- e1->name, e1->compat, e2->compat);
+ if (!is_same(e1->metric_constraint, e2->metric_constraint)) {
+ pr_debug2("testing event e1 %s: mismatched metric_constant, %s vs %s\n",
+ e1->name, e1->metric_constraint, e2->metric_constraint);
return -1;
}
@@ -375,9 +420,9 @@ static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
/* Verify generated events from pmu-events.c are as expected */
static int test_pmu_event_table(void)
{
- struct pmu_event *sys_event_tables = __test_pmu_get_sys_events_table();
- struct pmu_events_map *map = __test_pmu_get_events_map();
- struct pmu_event *table;
+ const struct pmu_event *sys_event_tables = __test_pmu_get_sys_events_table();
+ const struct pmu_events_map *map = __test_pmu_get_events_map();
+ const struct pmu_event *table;
int map_events = 0, expected_events;
/* ignore 3x sentinels */
@@ -473,7 +518,7 @@ static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
struct perf_pmu *pmu;
LIST_HEAD(aliases);
int res = 0;
- struct pmu_events_map *map = __test_pmu_get_events_map();
+ const struct pmu_events_map *map = __test_pmu_get_events_map();
struct perf_pmu_alias *a, *tmp;
if (!map)
@@ -526,7 +571,7 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
struct perf_pmu *pmu = &test_pmu->pmu;
const char *pmu_name = pmu->name;
struct perf_pmu_alias *a, *tmp, *alias;
- struct pmu_events_map *map;
+ const struct pmu_events_map *map;
LIST_HEAD(aliases);
int res = 0;
@@ -647,6 +692,16 @@ static struct perf_pmu_test_pmu test_pmus[] = {
&sys_ddr_pmu_write_cycles,
},
},
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_ccn_pmu4",
+ .is_uncore = 1,
+ .id = (char *)"0x01",
+ },
+ .aliases = {
+ &sys_ccn_pmu_read_cycles,
+ },
+ },
};
/* Test that aliases generated are as expected */
@@ -706,6 +761,7 @@ static int check_parse_id(const char *id, struct parse_events_error *error,
{
struct evlist *evlist;
int ret;
+ char *dup, *cur;
/* Numbers are always valid. */
if (is_number(id))
@@ -714,12 +770,22 @@ static int check_parse_id(const char *id, struct parse_events_error *error,
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
- ret = __parse_events(evlist, id, error, fake_pmu);
+
+ dup = strdup(id);
+ if (!dup)
+ return -ENOMEM;
+
+ for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
+ *cur = '/';
+
+ ret = __parse_events(evlist, dup, error, fake_pmu);
+ free(dup);
+
evlist__delete(evlist);
return ret;
}
-static int check_parse_cpu(const char *id, bool same_cpu, struct pmu_event *pe)
+static int check_parse_cpu(const char *id, bool same_cpu, const struct pmu_event *pe)
{
struct parse_events_error error = { .idx = 0, };
@@ -770,7 +836,7 @@ struct metric {
static int resolve_metric_simple(struct expr_parse_ctx *pctx,
struct list_head *compound_list,
- struct pmu_events_map *map,
+ const struct pmu_events_map *map,
const char *metric_name)
{
struct hashmap_entry *cur, *cur_tmp;
@@ -781,9 +847,9 @@ static int resolve_metric_simple(struct expr_parse_ctx *pctx,
do {
all = true;
- hashmap__for_each_entry_safe((&pctx->ids), cur, cur_tmp, bkt) {
+ hashmap__for_each_entry_safe(pctx->ids, cur, cur_tmp, bkt) {
struct metric_ref *ref;
- struct pmu_event *pe;
+ const struct pmu_event *pe;
pe = metricgroup__find_metric(cur->key, map);
if (!pe)
@@ -811,7 +877,7 @@ static int resolve_metric_simple(struct expr_parse_ctx *pctx,
ref->metric_expr = pe->metric_expr;
list_add_tail(&metric->list, compound_list);
- rc = expr__find_other(pe->metric_expr, NULL, pctx, 0);
+ rc = expr__find_ids(pe->metric_expr, NULL, pctx);
if (rc)
goto out_err;
break; /* The hashmap has been modified, so restart */
@@ -830,14 +896,19 @@ out_err:
static int test_parsing(void)
{
- struct pmu_events_map *cpus_map = pmu_events_map__find();
- struct pmu_events_map *map;
- struct pmu_event *pe;
+ const struct pmu_events_map *cpus_map = pmu_events_map__find();
+ const struct pmu_events_map *map;
+ const struct pmu_event *pe;
int i, j, k;
int ret = 0;
- struct expr_parse_ctx ctx;
+ struct expr_parse_ctx *ctx;
double result;
+ ctx = expr__ctx_new();
+ if (!ctx) {
+ pr_debug("expr__ctx_new failed");
+ return TEST_FAIL;
+ }
i = 0;
for (;;) {
map = &pmu_events_map[i++];
@@ -855,15 +926,14 @@ static int test_parsing(void)
break;
if (!pe->metric_expr)
continue;
- expr__ctx_init(&ctx);
- if (expr__find_other(pe->metric_expr, NULL, &ctx, 0)
- < 0) {
- expr_failure("Parse other failed", map, pe);
+ expr__ctx_clear(ctx);
+ if (expr__find_ids(pe->metric_expr, NULL, ctx) < 0) {
+ expr_failure("Parse find ids failed", map, pe);
ret++;
continue;
}
- if (resolve_metric_simple(&ctx, &compound_list, map,
+ if (resolve_metric_simple(ctx, &compound_list, map,
pe->metric_name)) {
expr_failure("Could not resolve metrics", map, pe);
ret++;
@@ -876,27 +946,27 @@ static int test_parsing(void)
* make them unique.
*/
k = 1;
- hashmap__for_each_entry((&ctx.ids), cur, bkt)
- expr__add_id_val(&ctx, strdup(cur->key), k++);
+ hashmap__for_each_entry(ctx->ids, cur, bkt)
+ expr__add_id_val(ctx, strdup(cur->key), k++);
- hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
if (check_parse_cpu(cur->key, map == cpus_map,
pe))
ret++;
}
list_for_each_entry_safe(metric, tmp, &compound_list, list) {
- expr__add_ref(&ctx, &metric->metric_ref);
+ expr__add_ref(ctx, &metric->metric_ref);
free(metric);
}
- if (expr__parse(&result, &ctx, pe->metric_expr, 0)) {
+ if (expr__parse(&result, ctx, pe->metric_expr)) {
expr_failure("Parse failed", map, pe);
ret++;
}
- expr__ctx_clear(&ctx);
}
}
+ expr__ctx_free(ctx);
/* TODO: fail when not ok */
exit:
return ret == 0 ? TEST_OK : TEST_SKIP;
@@ -916,7 +986,7 @@ static struct test_metric metrics[] = {
static int metric_parse_fake(const char *str)
{
- struct expr_parse_ctx ctx;
+ struct expr_parse_ctx *ctx;
struct hashmap_entry *cur;
double result;
int ret = -1;
@@ -925,9 +995,13 @@ static int metric_parse_fake(const char *str)
pr_debug("parsing '%s'\n", str);
- expr__ctx_init(&ctx);
- if (expr__find_other(str, NULL, &ctx, 0) < 0) {
- pr_err("expr__find_other failed\n");
+ ctx = expr__ctx_new();
+ if (!ctx) {
+ pr_debug("expr__ctx_new failed");
+ return TEST_FAIL;
+ }
+ if (expr__find_ids(str, NULL, ctx) < 0) {
+ pr_err("expr__find_ids failed\n");
return -1;
}
@@ -937,23 +1011,23 @@ static int metric_parse_fake(const char *str)
* make them unique.
*/
i = 1;
- hashmap__for_each_entry((&ctx.ids), cur, bkt)
- expr__add_id_val(&ctx, strdup(cur->key), i++);
+ hashmap__for_each_entry(ctx->ids, cur, bkt)
+ expr__add_id_val(ctx, strdup(cur->key), i++);
- hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
if (check_parse_fake(cur->key)) {
pr_err("check_parse_fake failed\n");
goto out;
}
}
- if (expr__parse(&result, &ctx, str, 0))
+ if (expr__parse(&result, ctx, str))
pr_err("expr__parse failed\n");
else
ret = 0;
out:
- expr__ctx_clear(&ctx);
+ expr__ctx_free(ctx);
return ret;
}
@@ -964,8 +1038,8 @@ out:
*/
static int test_parsing_fake(void)
{
- struct pmu_events_map *map;
- struct pmu_event *pe;
+ const struct pmu_events_map *map;
+ const struct pmu_event *pe;
unsigned int i, j;
int err = 0;
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
new file mode 100755
index 000000000000..de24d374ce24
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# perf all metricgroups test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+for m in $(perf list --raw-dump metricgroups); do
+ echo "Testing $m"
+ perf stat -M "$m" true
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
new file mode 100755
index 000000000000..7f4ba3cad632
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# perf all metrics test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+for m in $(perf list --raw-dump metrics); do
+ echo "Testing $m"
+ result=$(perf stat -M "$m" true 2>&1)
+ if [[ ! "$result" =~ "$m" ]] && [[ ! "$result" =~ "<not supported>" ]]; then
+ # We failed to see the metric and the events are support. Possibly the
+ # workload was too small so retry with something longer.
+ result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
+ if [[ ! "$result" =~ "$m" ]]; then
+ echo "Metric '$m' not printed in:"
+ echo "$result"
+ exit 1
+ fi
+ fi
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
new file mode 100755
index 000000000000..2de7fd0394fd
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# perf all PMU test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+for p in $(perf list --raw-dump pmu); do
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if [[ ! "$result" =~ "$p" ]] && [[ ! "$result" =~ "<not supported>" ]]; then
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if [[ ! "$result" =~ "$p" ]]; then
+ echo "Event '$p' not printed in:"
+ echo "$result"
+ exit 1
+ fi
+ fi
+done
+
+exit 0
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 193b7c91b4e2..4f884aabc7f4 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -3,6 +3,7 @@
#include <linux/rbtree.h>
#include <inttypes.h>
#include <string.h>
+#include <ctype.h>
#include <stdlib.h>
#include "dso.h"
#include "map.h"
@@ -14,6 +15,102 @@
#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
+static bool is_ignored_symbol(const char *name, char type)
+{
+ /* Symbol names that exactly match to the following are ignored.*/
+ static const char * const ignored_symbols[] = {
+ /*
+ * Symbols which vary between passes. Passes 1 and 2 must have
+ * identical symbol lists. The kallsyms_* symbols below are
+ * only added after pass 1, they would be included in pass 2
+ * when --all-symbols is specified so exclude them to get a
+ * stable symbol list.
+ */
+ "kallsyms_addresses",
+ "kallsyms_offsets",
+ "kallsyms_relative_base",
+ "kallsyms_num_syms",
+ "kallsyms_names",
+ "kallsyms_markers",
+ "kallsyms_token_table",
+ "kallsyms_token_index",
+ /* Exclude linker generated symbols which vary between passes */
+ "_SDA_BASE_", /* ppc */
+ "_SDA2_BASE_", /* ppc */
+ NULL
+ };
+
+ /* Symbol names that begin with the following are ignored.*/
+ static const char * const ignored_prefixes[] = {
+ "$", /* local symbols for ARM, MIPS, etc. */
+ ".LASANPC", /* s390 kasan local symbols */
+ "__crc_", /* modversions */
+ "__efistub_", /* arm64 EFI stub namespace */
+ "__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
+ "__AArch64ADRPThunk_", /* arm64 lld */
+ "__ARMV5PILongThunk_", /* arm lld */
+ "__ARMV7PILongThunk_",
+ "__ThumbV7PILongThunk_",
+ "__LA25Thunk_", /* mips lld */
+ "__microLA25Thunk_",
+ NULL
+ };
+
+ /* Symbol names that end with the following are ignored.*/
+ static const char * const ignored_suffixes[] = {
+ "_from_arm", /* arm */
+ "_from_thumb", /* arm */
+ "_veneer", /* arm */
+ NULL
+ };
+
+ /* Symbol names that contain the following are ignored.*/
+ static const char * const ignored_matches[] = {
+ ".long_branch.", /* ppc stub */
+ ".plt_branch.", /* ppc stub */
+ NULL
+ };
+
+ const char * const *p;
+
+ for (p = ignored_symbols; *p; p++)
+ if (!strcmp(name, *p))
+ return true;
+
+ for (p = ignored_prefixes; *p; p++)
+ if (!strncmp(name, *p, strlen(*p)))
+ return true;
+
+ for (p = ignored_suffixes; *p; p++) {
+ int l = strlen(name) - strlen(*p);
+
+ if (l >= 0 && !strcmp(name + l, *p))
+ return true;
+ }
+
+ for (p = ignored_matches; *p; p++) {
+ if (strstr(name, *p))
+ return true;
+ }
+
+ if (type == 'U' || type == 'u')
+ return true;
+ /* exclude debugging symbols */
+ if (type == 'N' || type == 'n')
+ return true;
+
+ if (toupper(type) == 'A') {
+ /* Keep these useful absolute symbols */
+ if (strcmp(name, "__kernel_syscall_via_break") &&
+ strcmp(name, "__kernel_syscall_via_epc") &&
+ strcmp(name, "__kernel_sigtramp") &&
+ strcmp(name, "__gp"))
+ return true;
+ }
+
+ return false;
+}
+
int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1;
@@ -169,6 +266,11 @@ next_pair:
* such as __indirect_thunk_end.
*/
continue;
+ } else if (is_ignored_symbol(sym->name, sym->type)) {
+ /*
+ * Ignore hidden symbols, see scripts/kallsyms.c for the details
+ */
+ continue;
} else {
pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
mem_start, sym->name);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index f2914d5bed6e..15b2366ad384 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -138,6 +138,7 @@ perf-y += expr.o
perf-y += branch.o
perf-y += mem2node.o
perf-y += clockid.o
+perf-y += list_sort.o
perf-$(CONFIG_LIBBPF) += bpf-loader.o
perf-$(CONFIG_LIBBPF) += bpf_map.o
@@ -315,3 +316,7 @@ $(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
$(OUTPUT)util/vsprintf.o: ../lib/vsprintf.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/list_sort.o: ../lib/list_sort.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 0bae061b2d6d..4bab2273303a 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -151,6 +151,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
#include "arch/mips/annotate/instructions.c"
#include "arch/x86/annotate/instructions.c"
#include "arch/powerpc/annotate/instructions.c"
+#include "arch/riscv64/annotate/instructions.c"
#include "arch/s390/annotate/instructions.c"
#include "arch/sparc/annotate/instructions.c"
@@ -183,7 +184,6 @@ static struct arch architectures[] = {
.init = x86__annotate_init,
.instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions),
- .ins_is_fused = x86__ins_is_fused,
.objdump = {
.comment_char = '#',
},
@@ -193,6 +193,10 @@ static struct arch architectures[] = {
.init = powerpc__annotate_init,
},
{
+ .name = "riscv64",
+ .init = riscv64__annotate_init,
+ },
+ {
.name = "s390",
.init = s390__annotate_init,
.objdump = {
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index ba0f20853651..ced2dac31dcf 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -127,9 +127,9 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
skel->rodata->num_cpu = evsel__nr_cpus(evsel);
- bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
- bpf_map__resize(skel->maps.fentry_readings, 1);
- bpf_map__resize(skel->maps.accum_readings, 1);
+ bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
+ bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
+ bpf_map__set_max_entries(skel->maps.accum_readings, 1);
prog_name = bpf_target_prog_name(prog_fd);
if (!prog_name) {
@@ -399,7 +399,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
return -1;
}
- bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
+ bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
err = bperf_leader_bpf__load(skel);
if (err) {
pr_err("Failed to load leader skeleton\n");
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 89aa5e71db1a..cbc6c2bca488 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -65,14 +65,14 @@ static int bperf_load_program(struct evlist *evlist)
/* we need one copy of events per cpu for reading */
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
- bpf_map__resize(skel->maps.events, map_size);
- bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
+ bpf_map__set_max_entries(skel->maps.events, map_size);
+ bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
/* previous result is saved in a per-cpu array */
map_size = evlist->core.nr_entries / nr_cgroups;
- bpf_map__resize(skel->maps.prev_readings, map_size);
+ bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
/* cgroup result needs all events (per-cpu) */
map_size = evlist->core.nr_entries;
- bpf_map__resize(skel->maps.cgrp_readings, map_size);
+ bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
set_max_rlimit();
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 2c06abf6dcd2..c7a9fa0ffae9 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -24,6 +24,16 @@
#include "util/parse-sublevel-options.h"
#include <linux/ctype.h>
+#include <traceevent/event-parse.h>
+
+#define MAKE_LIBTRACEEVENT_VERSION(a, b, c) ((a)*255*255+(b)*255+(c))
+#ifndef LIBTRACEEVENT_VERSION
+/*
+ * If LIBTRACEEVENT_VERSION wasn't computed then set to version 1.1.0 that ships
+ * with the Linux kernel tools.
+ */
+#define LIBTRACEEVENT_VERSION MAKE_LIBTRACEEVENT_VERSION(1, 1, 0)
+#endif
int verbose;
int debug_peo_args;
@@ -228,6 +238,15 @@ int perf_debug_option(const char *str)
/* Allow only verbose value in range (0, 10), otherwise set 0. */
verbose = (verbose < 0) || (verbose > 10) ? 0 : verbose;
+#if MAKE_LIBTRACEEVENT_VERSION(1, 3, 0) <= LIBTRACEEVENT_VERSION
+ if (verbose == 1)
+ tep_set_loglevel(TEP_LOG_INFO);
+ else if (verbose == 2)
+ tep_set_loglevel(TEP_LOG_DEBUG);
+ else if (verbose >= 3)
+ tep_set_loglevel(TEP_LOG_ALL);
+#endif
+
return 0;
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 83723ba11dc8..011da3924fc1 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -193,7 +193,7 @@ struct dso {
int fd;
int status;
u32 status_seen;
- size_t file_size;
+ u64 file_size;
struct list_head open_entry;
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index ac706304afe9..fe24801f8e9f 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -57,6 +57,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_CGROUP] = "CGROUP",
[PERF_RECORD_TEXT_POKE] = "TEXT_POKE",
+ [PERF_RECORD_AUX_OUTPUT_HW_ID] = "AUX_OUTPUT_HW_ID",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -237,6 +238,14 @@ int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
return machine__process_itrace_start_event(machine, event);
}
+int perf_event__process_aux_output_hw_id(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_aux_output_hw_id_event(machine, event);
+}
+
int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -407,6 +416,12 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
event->itrace_start.pid, event->itrace_start.tid);
}
+size_t perf_event__fprintf_aux_output_hw_id(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " hw_id: %#"PRI_lx64"\n",
+ event->aux_output_hw_id.hw_id);
+}
+
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
@@ -534,6 +549,9 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
case PERF_RECORD_TEXT_POKE:
ret += perf_event__fprintf_text_poke(event, machine, fp);
break;
+ case PERF_RECORD_AUX_OUTPUT_HW_ID:
+ ret += perf_event__fprintf_aux_output_hw_id(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 19ad64f2bd83..95ffed66369c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -330,6 +330,10 @@ int perf_event__process_itrace_start(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_aux_output_hw_id(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_event__process_switch(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -397,6 +401,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_aux_output_hw_id(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbfeceb2546c..96ef6a4a7c14 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -410,6 +410,11 @@ struct evsel *evsel__clone(struct evsel *orig)
if (evsel->filter == NULL)
goto out_err;
}
+ if (orig->metric_id) {
+ evsel->metric_id = strdup(orig->metric_id);
+ if (evsel->metric_id == NULL)
+ goto out_err;
+ }
evsel->cgrp = cgroup__get(orig->cgrp);
evsel->tp_format = orig->tp_format;
evsel->handler = orig->handler;
@@ -779,6 +784,17 @@ out_unknown:
return "unknown";
}
+const char *evsel__metric_id(const struct evsel *evsel)
+{
+ if (evsel->metric_id)
+ return evsel->metric_id;
+
+ if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event)
+ return "duration_time";
+
+ return "unknown";
+}
+
const char *evsel__group_name(struct evsel *evsel)
{
return evsel->group_name ?: "anon group";
@@ -1423,6 +1439,7 @@ void evsel__exit(struct evsel *evsel)
zfree(&evsel->group_name);
zfree(&evsel->name);
zfree(&evsel->pmu_name);
+ zfree(&evsel->metric_id);
evsel__zero_per_pkg(evsel);
hashmap__free(evsel->per_pkg_mask);
evsel->per_pkg_mask = NULL;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 1f7edfa8568a..45476a888942 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -68,6 +68,7 @@ struct evsel {
double scale;
const char *unit;
struct cgroup *cgrp;
+ const char *metric_id;
enum perf_tool_event tool_event;
/* parse modifier helper */
int exclude_GH;
@@ -261,6 +262,7 @@ bool evsel__match_bpf_counter_events(const char *name);
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
const char *evsel__name(struct evsel *evsel);
+const char *evsel__metric_id(const struct evsel *evsel);
const char *evsel__group_name(struct evsel *evsel);
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index a850fd0be3ee..77c6ad81a923 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -25,7 +25,6 @@ struct expr_id_data {
const char *metric_name;
const char *metric_expr;
} ref;
- struct expr_id *parent;
};
enum {
@@ -35,8 +34,6 @@ struct expr_id_data {
EXPR_ID_DATA__REF,
/* A reference but the value has been computed. */
EXPR_ID_DATA__REF_VALUE,
- /* A parent is remembered for the recursion check. */
- EXPR_ID_DATA__PARENT,
} kind;
};
@@ -59,21 +56,34 @@ static bool key_equal(const void *key1, const void *key2,
return !strcmp((const char *)key1, (const char *)key2);
}
-/* Caller must make sure id is allocated */
-int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
+struct hashmap *ids__new(void)
+{
+ return hashmap__new(key_hash, key_equal, NULL);
+}
+
+void ids__free(struct hashmap *ids)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ if (ids == NULL)
+ return;
+
+ hashmap__for_each_entry(ids, cur, bkt) {
+ free((char *)cur->key);
+ free(cur->value);
+ }
+
+ hashmap__free(ids);
+}
+
+int ids__insert(struct hashmap *ids, const char *id)
{
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
char *old_key = NULL;
int ret;
- data_ptr = malloc(sizeof(*data_ptr));
- if (!data_ptr)
- return -ENOMEM;
-
- data_ptr->parent = ctx->parent;
- data_ptr->kind = EXPR_ID_DATA__PARENT;
-
- ret = hashmap__set(&ctx->ids, id, data_ptr,
+ ret = hashmap__set(ids, id, data_ptr,
(const void **)&old_key, (void **)&old_data);
if (ret)
free(data_ptr);
@@ -82,6 +92,48 @@ int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
return ret;
}
+struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
+{
+ size_t bkt;
+ struct hashmap_entry *cur;
+ int ret;
+ struct expr_id_data *old_data = NULL;
+ char *old_key = NULL;
+
+ if (!ids1)
+ return ids2;
+
+ if (!ids2)
+ return ids1;
+
+ if (hashmap__size(ids1) < hashmap__size(ids2)) {
+ struct hashmap *tmp = ids1;
+
+ ids1 = ids2;
+ ids2 = tmp;
+ }
+ hashmap__for_each_entry(ids2, cur, bkt) {
+ ret = hashmap__set(ids1, cur->key, cur->value,
+ (const void **)&old_key, (void **)&old_data);
+ free(old_key);
+ free(old_data);
+
+ if (ret) {
+ hashmap__free(ids1);
+ hashmap__free(ids2);
+ return NULL;
+ }
+ }
+ hashmap__free(ids2);
+ return ids1;
+}
+
+/* Caller must make sure id is allocated */
+int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
+{
+ return ids__insert(ctx->ids, id);
+}
+
/* Caller must make sure id is allocated */
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
{
@@ -95,7 +147,7 @@ int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
data_ptr->val = val;
data_ptr->kind = EXPR_ID_DATA__VALUE;
- ret = hashmap__set(&ctx->ids, id, data_ptr,
+ ret = hashmap__set(ctx->ids, id, data_ptr,
(const void **)&old_key, (void **)&old_data);
if (ret)
free(data_ptr);
@@ -140,7 +192,7 @@ int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
data_ptr->ref.metric_expr = ref->metric_expr;
data_ptr->kind = EXPR_ID_DATA__REF;
- ret = hashmap__set(&ctx->ids, name, data_ptr,
+ ret = hashmap__set(ctx->ids, name, data_ptr,
(const void **)&old_key, (void **)&old_data);
if (ret)
free(data_ptr);
@@ -156,9 +208,24 @@ int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **data)
{
- return hashmap__find(&ctx->ids, id, (void **)data) ? 0 : -1;
+ return hashmap__find(ctx->ids, id, (void **)data) ? 0 : -1;
+}
+
+bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
+ struct expr_parse_ctx *needles)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+ struct expr_id_data *data;
+
+ hashmap__for_each_entry(needles->ids, cur, bkt) {
+ if (expr__get_id(haystack, cur->key, &data))
+ return false;
+ }
+ return true;
}
+
int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **datap)
{
@@ -175,15 +242,12 @@ int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
case EXPR_ID_DATA__VALUE:
pr_debug2("lookup(%s): val %f\n", id, data->val);
break;
- case EXPR_ID_DATA__PARENT:
- pr_debug2("lookup(%s): parent %s\n", id, data->parent->id);
- break;
case EXPR_ID_DATA__REF:
pr_debug2("lookup(%s): ref metric name %s\n", id,
data->ref.metric_name);
pr_debug("processing metric: %s ENTRY\n", id);
data->kind = EXPR_ID_DATA__REF_VALUE;
- if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr, 1)) {
+ if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
pr_debug("%s failed to count\n", id);
return -1;
}
@@ -205,15 +269,24 @@ void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
struct expr_id_data *old_val = NULL;
char *old_key = NULL;
- hashmap__delete(&ctx->ids, id,
+ hashmap__delete(ctx->ids, id,
(const void **)&old_key, (void **)&old_val);
free(old_key);
free(old_val);
}
-void expr__ctx_init(struct expr_parse_ctx *ctx)
+struct expr_parse_ctx *expr__ctx_new(void)
{
- hashmap__init(&ctx->ids, key_hash, key_equal, NULL);
+ struct expr_parse_ctx *ctx;
+
+ ctx = malloc(sizeof(struct expr_parse_ctx));
+ if (!ctx)
+ return NULL;
+
+ ctx->ids = hashmap__new(key_hash, key_equal, NULL);
+ ctx->runtime = 0;
+
+ return ctx;
}
void expr__ctx_clear(struct expr_parse_ctx *ctx)
@@ -221,20 +294,32 @@ void expr__ctx_clear(struct expr_parse_ctx *ctx)
struct hashmap_entry *cur;
size_t bkt;
- hashmap__for_each_entry((&ctx->ids), cur, bkt) {
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
+ free((char *)cur->key);
+ free(cur->value);
+ }
+ hashmap__clear(ctx->ids);
+}
+
+void expr__ctx_free(struct expr_parse_ctx *ctx)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
free((char *)cur->key);
free(cur->value);
}
- hashmap__clear(&ctx->ids);
+ hashmap__free(ctx->ids);
+ free(ctx);
}
static int
__expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
- int start, int runtime)
+ bool compute_ids)
{
struct expr_scanner_ctx scanner_ctx = {
- .start_token = start,
- .runtime = runtime,
+ .runtime = ctx->runtime,
};
YY_BUFFER_STATE buffer;
void *scanner;
@@ -253,7 +338,7 @@ __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
expr_set_debug(1, scanner);
#endif
- ret = expr_parse(val, ctx, scanner);
+ ret = expr_parse(val, ctx, compute_ids, scanner);
expr__flush_buffer(buffer, scanner);
expr__delete_buffer(buffer, scanner);
@@ -262,15 +347,15 @@ __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
}
int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
- const char *expr, int runtime)
+ const char *expr)
{
- return __expr__parse(final_val, ctx, expr, EXPR_PARSE, runtime) ? -1 : 0;
+ return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
}
-int expr__find_other(const char *expr, const char *one,
- struct expr_parse_ctx *ctx, int runtime)
+int expr__find_ids(const char *expr, const char *one,
+ struct expr_parse_ctx *ctx)
{
- int ret = __expr__parse(NULL, ctx, expr, EXPR_OTHER, runtime);
+ int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
if (one)
expr__del_id(ctx, one);
@@ -285,9 +370,3 @@ double expr_id_data__value(const struct expr_id_data *data)
assert(data->kind == EXPR_ID_DATA__REF_VALUE);
return data->ref.val;
}
-
-struct expr_id *expr_id_data__parent(struct expr_id_data *data)
-{
- assert(data->kind == EXPR_ID_DATA__PARENT);
- return data->parent;
-}
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index 85df3e4771e4..cf81f9166dbb 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -13,39 +13,47 @@
struct metric_ref;
-struct expr_id {
- char *id;
- struct expr_id *parent;
-};
-
struct expr_parse_ctx {
- struct hashmap ids;
- struct expr_id *parent;
+ struct hashmap *ids;
+ int runtime;
};
struct expr_id_data;
struct expr_scanner_ctx {
- int start_token;
int runtime;
};
-void expr__ctx_init(struct expr_parse_ctx *ctx);
+struct hashmap *ids__new(void);
+void ids__free(struct hashmap *ids);
+int ids__insert(struct hashmap *ids, const char *id);
+/*
+ * Union two sets of ids (hashmaps) and construct a third, freeing ids1 and
+ * ids2.
+ */
+struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2);
+
+struct expr_parse_ctx *expr__ctx_new(void);
void expr__ctx_clear(struct expr_parse_ctx *ctx);
+void expr__ctx_free(struct expr_parse_ctx *ctx);
+
void expr__del_id(struct expr_parse_ctx *ctx, const char *id);
int expr__add_id(struct expr_parse_ctx *ctx, const char *id);
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val);
int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref);
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **data);
+bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
+ struct expr_parse_ctx *needles);
int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **datap);
+
int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
- const char *expr, int runtime);
-int expr__find_other(const char *expr, const char *one,
- struct expr_parse_ctx *ids, int runtime);
+ const char *expr);
+
+int expr__find_ids(const char *expr, const char *one,
+ struct expr_parse_ctx *ids);
double expr_id_data__value(const struct expr_id_data *data);
-struct expr_id *expr_id_data__parent(struct expr_id_data *data);
#endif
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index 13e5e3c75f56..bd20f33418ba 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -41,11 +41,9 @@ static char *normalize(char *str, int runtime)
char *dst = str;
while (*str) {
- if (*str == '@')
- *dst++ = '/';
- else if (*str == '\\')
+ if (*str == '\\')
*dst++ = *++str;
- else if (*str == '?') {
+ else if (*str == '?') {
char *paramval;
int i = 0;
int size = asprintf(&paramval, "%d", runtime);
@@ -91,15 +89,6 @@ symbol ({spec}|{sym})+
%%
struct expr_scanner_ctx *sctx = expr_get_extra(yyscanner);
- {
- int start_token = sctx->start_token;
-
- if (sctx->start_token) {
- sctx->start_token = 0;
- return start_token;
- }
- }
-
d_ratio { return D_RATIO; }
max { return MAX; }
min { return MIN; }
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index b2ada8f8309a..f969dfa525bd 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -1,42 +1,43 @@
/* Simple expression parser */
%{
#define YYDEBUG 1
-#include <stdio.h>
-#include "util.h"
+#include <assert.h>
+#include <math.h>
#include "util/debug.h"
-#include <stdlib.h> // strtod()
+#include "smt.h"
#define IN_EXPR_Y 1
#include "expr.h"
-#include "smt.h"
-#include <string.h>
-
-static double d_ratio(double val0, double val1)
-{
- if (val1 == 0) {
- return 0;
- }
- return val0 / val1;
-}
-
%}
%define api.pure full
%parse-param { double *final_val }
%parse-param { struct expr_parse_ctx *ctx }
+%parse-param { bool compute_ids }
%parse-param {void *scanner}
%lex-param {void* scanner}
%union {
double num;
char *str;
+ struct ids {
+ /*
+ * When creating ids, holds the working set of event ids. NULL
+ * implies the set is empty.
+ */
+ struct hashmap *ids;
+ /*
+ * The metric value. When not creating ids this is the value
+ * read from a counter, a constant or some computed value. When
+ * creating ids the value is either a constant or BOTTOM. NAN is
+ * used as the special BOTTOM value, representing a "set of all
+ * values" case.
+ */
+ double val;
+ } ids;
}
-%token EXPR_PARSE EXPR_OTHER EXPR_ERROR
-%token <num> NUMBER
-%token <str> ID
-%destructor { free ($$); } <str>
-%token MIN MAX IF ELSE SMT_ON D_RATIO
+%token ID NUMBER MIN MAX IF ELSE SMT_ON D_RATIO EXPR_ERROR
%left MIN MAX IF
%left '|'
%left '^'
@@ -45,83 +46,245 @@ static double d_ratio(double val0, double val1)
%left '-' '+'
%left '*' '/' '%'
%left NEG NOT
-%type <num> expr if_expr
+%type <num> NUMBER
+%type <str> ID
+%destructor { free ($$); } <str>
+%type <ids> expr if_expr
+%destructor { ids__free($$.ids); } <ids>
%{
static void expr_error(double *final_val __maybe_unused,
struct expr_parse_ctx *ctx __maybe_unused,
+ bool compute_ids __maybe_unused,
void *scanner,
const char *s)
{
pr_debug("%s\n", s);
}
+/*
+ * During compute ids, the special "bottom" value uses NAN to represent the set
+ * of all values. NAN is selected as it isn't a useful constant value.
+ */
+#define BOTTOM NAN
+
+/* During computing ids, does val represent a constant (non-BOTTOM) value? */
+static bool is_const(double val)
+{
+ return isfinite(val);
+}
+
+static struct ids union_expr(struct ids ids1, struct ids ids2)
+{
+ struct ids result = {
+ .val = BOTTOM,
+ .ids = ids__union(ids1.ids, ids2.ids),
+ };
+ return result;
+}
+
+/*
+ * If we're not computing ids or $1 and $3 are constants, compute the new
+ * constant value using OP. Its invariant that there are no ids. If computing
+ * ids for non-constants union the set of IDs that must be computed.
+ */
+#define BINARY_LONG_OP(RESULT, OP, LHS, RHS) \
+ if (!compute_ids || (is_const(LHS.val) && is_const(RHS.val))) { \
+ assert(LHS.ids == NULL); \
+ assert(RHS.ids == NULL); \
+ RESULT.val = (long)LHS.val OP (long)RHS.val; \
+ RESULT.ids = NULL; \
+ } else { \
+ RESULT = union_expr(LHS, RHS); \
+ }
+
+#define BINARY_OP(RESULT, OP, LHS, RHS) \
+ if (!compute_ids || (is_const(LHS.val) && is_const(RHS.val))) { \
+ assert(LHS.ids == NULL); \
+ assert(RHS.ids == NULL); \
+ RESULT.val = LHS.val OP RHS.val; \
+ RESULT.ids = NULL; \
+ } else { \
+ RESULT = union_expr(LHS, RHS); \
+ }
+
%}
%%
-start:
-EXPR_PARSE all_expr
-|
-EXPR_OTHER all_other
+start: if_expr
+{
+ if (compute_ids)
+ ctx->ids = ids__union($1.ids, ctx->ids);
-all_other: all_other other
-|
+ if (final_val)
+ *final_val = $1.val;
+}
+;
+
+if_expr: expr IF expr ELSE expr
+{
+ if (fpclassify($3.val) == FP_ZERO) {
+ /*
+ * The IF expression evaluated to 0 so treat as false, take the
+ * ELSE and discard everything else.
+ */
+ $$.val = $5.val;
+ $$.ids = $5.ids;
+ ids__free($1.ids);
+ ids__free($3.ids);
+ } else if (!compute_ids || is_const($3.val)) {
+ /*
+ * If ids aren't computed then treat the expression as true. If
+ * ids are being computed and the IF expr is a non-zero
+ * constant, then also evaluate the true case.
+ */
+ $$.val = $1.val;
+ $$.ids = $1.ids;
+ ids__free($3.ids);
+ ids__free($5.ids);
+ } else if ($1.val == $5.val) {
+ /*
+ * LHS == RHS, so both are an identical constant. No need to
+ * evaluate any events.
+ */
+ $$.val = $1.val;
+ $$.ids = NULL;
+ ids__free($1.ids);
+ ids__free($3.ids);
+ ids__free($5.ids);
+ } else {
+ /*
+ * Value is either the LHS or RHS and we need the IF expression
+ * to compute it.
+ */
+ $$ = union_expr($1, union_expr($3, $5));
+ }
+}
+| expr
+;
-other: ID
+expr: NUMBER
{
- expr__add_id(ctx, $1);
-}
-|
-MIN | MAX | IF | ELSE | SMT_ON | NUMBER | '|' | '^' | '&' | '-' | '+' | '*' | '/' | '%' | '(' | ')' | ','
-|
-'<' | '>' | D_RATIO
-
-all_expr: if_expr { *final_val = $1; }
- ;
-
-if_expr:
- expr IF expr ELSE expr { $$ = $3 ? $1 : $5; }
- | expr
- ;
-
-expr: NUMBER
- | ID {
- struct expr_id_data *data;
-
- if (expr__resolve_id(ctx, $1, &data)) {
- free($1);
- YYABORT;
- }
-
- $$ = expr_id_data__value(data);
- free($1);
- }
- | expr '|' expr { $$ = (long)$1 | (long)$3; }
- | expr '&' expr { $$ = (long)$1 & (long)$3; }
- | expr '^' expr { $$ = (long)$1 ^ (long)$3; }
- | expr '<' expr { $$ = $1 < $3; }
- | expr '>' expr { $$ = $1 > $3; }
- | expr '+' expr { $$ = $1 + $3; }
- | expr '-' expr { $$ = $1 - $3; }
- | expr '*' expr { $$ = $1 * $3; }
- | expr '/' expr { if ($3 == 0) {
- pr_debug("division by zero\n");
- YYABORT;
- }
- $$ = $1 / $3;
- }
- | expr '%' expr { if ((long)$3 == 0) {
- pr_debug("division by zero\n");
- YYABORT;
- }
- $$ = (long)$1 % (long)$3;
- }
- | '-' expr %prec NEG { $$ = -$2; }
- | '(' if_expr ')' { $$ = $2; }
- | MIN '(' expr ',' expr ')' { $$ = $3 < $5 ? $3 : $5; }
- | MAX '(' expr ',' expr ')' { $$ = $3 > $5 ? $3 : $5; }
- | SMT_ON { $$ = smt_on() > 0; }
- | D_RATIO '(' expr ',' expr ')' { $$ = d_ratio($3,$5); }
- ;
+ $$.val = $1;
+ $$.ids = NULL;
+}
+| ID
+{
+ if (!compute_ids) {
+ /*
+ * Compute the event's value from ID. If the ID isn't known then
+ * it isn't used to compute the formula so set to NAN.
+ */
+ struct expr_id_data *data;
+
+ $$.val = NAN;
+ if (expr__resolve_id(ctx, $1, &data) == 0)
+ $$.val = expr_id_data__value(data);
+
+ $$.ids = NULL;
+ free($1);
+ } else {
+ /*
+ * Set the value to BOTTOM to show that any value is possible
+ * when the event is computed. Create a set of just the ID.
+ */
+ $$.val = BOTTOM;
+ $$.ids = ids__new();
+ if (!$$.ids || ids__insert($$.ids, $1))
+ YYABORT;
+ }
+}
+| expr '|' expr { BINARY_LONG_OP($$, |, $1, $3); }
+| expr '&' expr { BINARY_LONG_OP($$, &, $1, $3); }
+| expr '^' expr { BINARY_LONG_OP($$, ^, $1, $3); }
+| expr '<' expr { BINARY_OP($$, <, $1, $3); }
+| expr '>' expr { BINARY_OP($$, >, $1, $3); }
+| expr '+' expr { BINARY_OP($$, +, $1, $3); }
+| expr '-' expr { BINARY_OP($$, -, $1, $3); }
+| expr '*' expr { BINARY_OP($$, *, $1, $3); }
+| expr '/' expr
+{
+ if (fpclassify($3.val) == FP_ZERO) {
+ pr_debug("division by zero\n");
+ YYABORT;
+ } else if (!compute_ids || (is_const($1.val) && is_const($3.val))) {
+ assert($1.ids == NULL);
+ assert($3.ids == NULL);
+ $$.val = $1.val / $3.val;
+ $$.ids = NULL;
+ } else {
+ /* LHS and/or RHS need computing from event IDs so union. */
+ $$ = union_expr($1, $3);
+ }
+}
+| expr '%' expr
+{
+ if (fpclassify($3.val) == FP_ZERO) {
+ pr_debug("division by zero\n");
+ YYABORT;
+ } else if (!compute_ids || (is_const($1.val) && is_const($3.val))) {
+ assert($1.ids == NULL);
+ assert($3.ids == NULL);
+ $$.val = (long)$1.val % (long)$3.val;
+ $$.ids = NULL;
+ } else {
+ /* LHS and/or RHS need computing from event IDs so union. */
+ $$ = union_expr($1, $3);
+ }
+}
+| D_RATIO '(' expr ',' expr ')'
+{
+ if (fpclassify($5.val) == FP_ZERO) {
+ /*
+ * Division by constant zero always yields zero and no events
+ * are necessary.
+ */
+ assert($5.ids == NULL);
+ $$.val = 0.0;
+ $$.ids = NULL;
+ ids__free($3.ids);
+ } else if (!compute_ids || (is_const($3.val) && is_const($5.val))) {
+ assert($3.ids == NULL);
+ assert($5.ids == NULL);
+ $$.val = $3.val / $5.val;
+ $$.ids = NULL;
+ } else {
+ /* LHS and/or RHS need computing from event IDs so union. */
+ $$ = union_expr($3, $5);
+ }
+}
+| '-' expr %prec NEG
+{
+ $$.val = -$2.val;
+ $$.ids = $2.ids;
+}
+| '(' if_expr ')'
+{
+ $$ = $2;
+}
+| MIN '(' expr ',' expr ')'
+{
+ if (!compute_ids) {
+ $$.val = $3.val < $5.val ? $3.val : $5.val;
+ $$.ids = NULL;
+ } else {
+ $$ = union_expr($3, $5);
+ }
+}
+| MAX '(' expr ',' expr ')'
+{
+ if (!compute_ids) {
+ $$.val = $3.val > $5.val ? $3.val : $5.val;
+ $$.ids = NULL;
+ } else {
+ $$ = union_expr($3, $5);
+ }
+}
+| SMT_ON
+{
+ $$.val = smt_on() > 0 ? 1.0 : 0.0;
+ $$.ids = NULL;
+}
+;
%%
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 6f852b305e92..c9542fada8fb 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -111,6 +111,7 @@ struct intel_pt {
u64 cbr_id;
u64 psb_id;
+ bool single_pebs;
bool sample_pebs;
struct evsel *pebs_evsel;
@@ -148,6 +149,14 @@ enum switch_state {
INTEL_PT_SS_EXPECTING_SWITCH_IP,
};
+/* applicable_counters is 64-bits */
+#define INTEL_PT_MAX_PEBS 64
+
+struct intel_pt_pebs_event {
+ struct evsel *evsel;
+ u64 id;
+};
+
struct intel_pt_queue {
struct intel_pt *pt;
unsigned int queue_nr;
@@ -189,6 +198,7 @@ struct intel_pt_queue {
u64 last_br_cyc_cnt;
unsigned int cbr_seen;
char insn[INTEL_PT_INSN_BUF_SZ];
+ struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
};
static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
@@ -1978,15 +1988,13 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack,
}
}
-static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
+static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct perf_sample sample = { .ip = 0, };
union perf_event *event = ptq->event_buf;
struct intel_pt *pt = ptq->pt;
- struct evsel *evsel = pt->pebs_evsel;
u64 sample_type = evsel->core.attr.sample_type;
- u64 id = evsel->core.id[0];
u8 cpumode;
u64 regs[8 * sizeof(sample.intr_regs.mask)];
@@ -2112,6 +2120,45 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
}
+static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
+{
+ struct intel_pt *pt = ptq->pt;
+ struct evsel *evsel = pt->pebs_evsel;
+ u64 id = evsel->core.id[0];
+
+ return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
+}
+
+static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
+{
+ const struct intel_pt_blk_items *items = &ptq->state->items;
+ struct intel_pt_pebs_event *pe;
+ struct intel_pt *pt = ptq->pt;
+ int err = -EINVAL;
+ int hw_id;
+
+ if (!items->has_applicable_counters || !items->applicable_counters) {
+ if (!pt->single_pebs)
+ pr_err("PEBS-via-PT record with no applicable_counters\n");
+ return intel_pt_synth_single_pebs_sample(ptq);
+ }
+
+ for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
+ pe = &ptq->pebs[hw_id];
+ if (!pe->evsel) {
+ if (!pt->single_pebs)
+ pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
+ hw_id);
+ return intel_pt_synth_single_pebs_sample(ptq);
+ }
+ err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
@@ -2882,6 +2929,30 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt,
event->itrace_start.tid);
}
+static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ u64 hw_id = event->aux_output_hw_id.hw_id;
+ struct auxtrace_queue *queue;
+ struct intel_pt_queue *ptq;
+ struct evsel *evsel;
+
+ queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
+ evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
+ if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
+ pr_err("Bad AUX output hardware ID\n");
+ return -EINVAL;
+ }
+
+ ptq = queue->priv;
+
+ ptq->pebs[hw_id].evsel = evsel;
+ ptq->pebs[hw_id].id = sample->id;
+
+ return 0;
+}
+
static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
@@ -3009,6 +3080,8 @@ static int intel_pt_process_event(struct perf_session *session,
err = intel_pt_process_switch(pt, sample);
else if (event->header.type == PERF_RECORD_ITRACE_START)
err = intel_pt_process_itrace_start(pt, event, sample);
+ else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
+ err = intel_pt_process_aux_output_hw_id(pt, event, sample);
else if (event->header.type == PERF_RECORD_SWITCH ||
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
@@ -3393,9 +3466,13 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
evlist__for_each_entry(pt->session->evlist, evsel) {
if (evsel->core.attr.aux_output && evsel->core.id) {
+ if (pt->single_pebs) {
+ pt->single_pebs = false;
+ return;
+ }
+ pt->single_pebs = true;
pt->sample_pebs = true;
pt->pebs_evsel = evsel;
- return;
}
}
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 44e40bad0e33..fb8496df8432 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -755,6 +755,14 @@ int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
return 0;
}
+int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_aux_output_hw_id(event, stdout);
+ return 0;
+}
+
int machine__process_switch_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
@@ -2028,6 +2036,8 @@ int machine__process_event(struct machine *machine, union perf_event *event,
ret = machine__process_bpf(machine, event, sample); break;
case PERF_RECORD_TEXT_POKE:
ret = machine__process_text_poke(machine, event, sample); break;
+ case PERF_RECORD_AUX_OUTPUT_HW_ID:
+ ret = machine__process_aux_output_hw_id_event(machine, event); break;
default:
ret = -1;
break;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 7377ed6efdf1..a143087eeb47 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -124,6 +124,8 @@ int machine__process_aux_event(struct machine *machine,
union perf_event *event);
int machine__process_itrace_start_event(struct machine *machine,
union perf_event *event);
+int machine__process_aux_output_hw_id_event(struct machine *machine,
+ union perf_event *event);
int machine__process_switch_event(struct machine *machine,
union perf_event *event);
int machine__process_namespaces_event(struct machine *machine,
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 29b747ac31c1..4917e9704765 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -11,6 +11,7 @@
#include "evsel.h"
#include "strbuf.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#include "expr.h"
#include "rblist.h"
#include <string.h>
@@ -18,6 +19,7 @@
#include "strlist.h"
#include <assert.h>
#include <linux/ctype.h>
+#include <linux/list_sort.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <subcmd/parse-options.h>
@@ -84,6 +86,7 @@ static void metric_event_delete(struct rblist *rblist __maybe_unused,
struct metric_expr *expr, *tmp;
list_for_each_entry_safe(expr, tmp, &me->head, nd) {
+ free((char *)expr->metric_name);
free(expr->metric_refs);
free(expr->metric_events);
free(expr);
@@ -116,289 +119,206 @@ struct metric_ref_node {
struct list_head list;
};
+/**
+ * The metric under construction. The data held here will be placed in a
+ * metric_expr.
+ */
struct metric {
struct list_head nd;
- struct expr_parse_ctx pctx;
+ /**
+ * The expression parse context importantly holding the IDs contained
+ * within the expression.
+ */
+ struct expr_parse_ctx *pctx;
+ /** The name of the metric such as "IPC". */
const char *metric_name;
+ /** Modifier on the metric such as "u" or NULL for none. */
+ const char *modifier;
+ /** The expression to parse, for example, "instructions/cycles". */
const char *metric_expr;
+ /**
+ * The "ScaleUnit" that scales and adds a unit to the metric during
+ * output.
+ */
const char *metric_unit;
- struct list_head metric_refs;
- int metric_refs_cnt;
- int runtime;
+ /** Optional null terminated array of referenced metrics. */
+ struct metric_ref *metric_refs;
+ /**
+ * Is there a constraint on the group of events? In which case the
+ * events won't be grouped.
+ */
bool has_constraint;
+ /**
+ * Parsed events for the metric. Optional as events may be taken from a
+ * different metric whose group contains all the IDs necessary for this
+ * one.
+ */
+ struct evlist *evlist;
};
-#define RECURSION_ID_MAX 1000
+static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
+{
+ static bool violate_nmi_constraint;
-struct expr_ids {
- struct expr_id id[RECURSION_ID_MAX];
- int cnt;
-};
+ if (!foot) {
+ pr_warning("Splitting metric group %s into standalone metrics.\n", name);
+ violate_nmi_constraint = true;
+ return;
+ }
-static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
+ if (!violate_nmi_constraint)
+ return;
+
+ pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
+ " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
+ " perf stat ...\n"
+ " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
+}
+
+static bool metricgroup__has_constraint(const struct pmu_event *pe)
{
- if (ids->cnt >= RECURSION_ID_MAX)
- return NULL;
- return &ids->id[ids->cnt++];
+ if (!pe->metric_constraint)
+ return false;
+
+ if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
+ sysctl__nmi_watchdog_enabled()) {
+ metricgroup___watchdog_constraint_hint(pe->metric_name, false);
+ return true;
+ }
+
+ return false;
}
-static void expr_ids__exit(struct expr_ids *ids)
+static struct metric *metric__new(const struct pmu_event *pe,
+ const char *modifier,
+ bool metric_no_group,
+ int runtime)
{
- int i;
+ struct metric *m;
+
+ m = zalloc(sizeof(*m));
+ if (!m)
+ return NULL;
+
+ m->pctx = expr__ctx_new();
+ if (!m->pctx) {
+ free(m);
+ return NULL;
+ }
- for (i = 0; i < ids->cnt; i++)
- free(ids->id[i].id);
+ m->metric_name = pe->metric_name;
+ m->modifier = modifier ? strdup(modifier) : NULL;
+ if (modifier && !m->modifier) {
+ free(m);
+ expr__ctx_free(m->pctx);
+ return NULL;
+ }
+ m->metric_expr = pe->metric_expr;
+ m->metric_unit = pe->unit;
+ m->pctx->runtime = runtime;
+ m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
+ m->metric_refs = NULL;
+ m->evlist = NULL;
+
+ return m;
}
-static bool contains_event(struct evsel **metric_events, int num_events,
- const char *event_name)
+static void metric__free(struct metric *m)
+{
+ free(m->metric_refs);
+ expr__ctx_free(m->pctx);
+ free((char *)m->modifier);
+ free(m);
+}
+
+static bool contains_metric_id(struct evsel **metric_events, int num_events,
+ const char *metric_id)
{
int i;
for (i = 0; i < num_events; i++) {
- if (!strcmp(metric_events[i]->name, event_name))
+ if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
return true;
}
return false;
}
-static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
-{
- if (!ev1->pmu_name || !ev2->pmu_name)
- return true;
-
- return !strcmp(ev1->pmu_name, ev2->pmu_name);
-}
-
/**
- * Find a group of events in perf_evlist that correspond to those from a parsed
- * metric expression. Note, as find_evsel_group is called in the same order as
- * perf_evlist was constructed, metric_no_merge doesn't need to test for
- * underfilling a group.
- * @perf_evlist: a list of events something like: {metric1 leader, metric1
- * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
- * metric2 sibling}:W,duration_time
- * @pctx: the parse context for the metric expression.
- * @metric_no_merge: don't attempt to share events for the metric with other
- * metrics.
- * @has_constraint: is there a constraint on the group of events? In which case
- * the events won't be grouped.
- * @metric_events: out argument, null terminated array of evsel's associated
- * with the metric.
- * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
- * @return the first metric event or NULL on failure.
+ * setup_metric_events - Find a group of events in metric_evlist that correspond
+ * to the IDs from a parsed metric expression.
+ * @ids: the metric IDs to match.
+ * @metric_evlist: the list of perf events.
+ * @out_metric_events: holds the created metric events array.
*/
-static struct evsel *find_evsel_group(struct evlist *perf_evlist,
- struct expr_parse_ctx *pctx,
- bool metric_no_merge,
- bool has_constraint,
- struct evsel **metric_events,
- unsigned long *evlist_used)
+static int setup_metric_events(struct hashmap *ids,
+ struct evlist *metric_evlist,
+ struct evsel ***out_metric_events)
{
- struct evsel *ev, *current_leader = NULL;
- struct expr_id_data *val_ptr;
- int i = 0, matched_events = 0, events_to_match;
- const int idnum = (int)hashmap__size(&pctx->ids);
+ struct evsel **metric_events;
+ const char *metric_id;
+ struct evsel *ev;
+ size_t ids_size, matched_events, i;
- /*
- * duration_time is always grouped separately, when events are grouped
- * (ie has_constraint is false) then ignore it in the matching loop and
- * add it to metric_events at the end.
- */
- if (!has_constraint &&
- hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
- events_to_match = idnum - 1;
- else
- events_to_match = idnum;
+ *out_metric_events = NULL;
+ ids_size = hashmap__size(ids);
+
+ metric_events = calloc(sizeof(void *), ids_size + 1);
+ if (!metric_events)
+ return -ENOMEM;
+
+ matched_events = 0;
+ evlist__for_each_entry(metric_evlist, ev) {
+ struct expr_id_data *val_ptr;
- evlist__for_each_entry (perf_evlist, ev) {
/*
- * Events with a constraint aren't grouped and match the first
- * events available.
+ * Check for duplicate events with the same name. For
+ * example, uncore_imc/cas_count_read/ will turn into 6
+ * events per socket on skylakex. Only the first such
+ * event is placed in metric_events.
*/
- if (has_constraint && ev->weak_group)
- continue;
- /* Ignore event if already used and merging is disabled. */
- if (metric_no_merge && test_bit(ev->core.idx, evlist_used))
+ metric_id = evsel__metric_id(ev);
+ if (contains_metric_id(metric_events, matched_events, metric_id))
continue;
- if (!has_constraint && !evsel__has_leader(ev, current_leader)) {
- /*
- * Start of a new group, discard the whole match and
- * start again.
- */
- matched_events = 0;
- memset(metric_events, 0,
- sizeof(struct evsel *) * idnum);
- current_leader = evsel__leader(ev);
- }
/*
- * Check for duplicate events with the same name. For example,
- * uncore_imc/cas_count_read/ will turn into 6 events per socket
- * on skylakex. Only the first such event is placed in
- * metric_events. If events aren't grouped then this also
- * ensures that the same event in different sibling groups
- * aren't both added to metric_events.
+ * Does this event belong to the parse context? For
+ * combined or shared groups, this metric may not care
+ * about this event.
*/
- if (contains_event(metric_events, matched_events, ev->name))
- continue;
- /* Does this event belong to the parse context? */
- if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
+ if (hashmap__find(ids, metric_id, (void **)&val_ptr)) {
metric_events[matched_events++] = ev;
- if (matched_events == events_to_match)
- break;
- }
-
- if (events_to_match != idnum) {
- /* Add the first duration_time. */
- evlist__for_each_entry(perf_evlist, ev) {
- if (!strcmp(ev->name, "duration_time")) {
- metric_events[matched_events++] = ev;
+ if (matched_events >= ids_size)
break;
- }
}
}
-
- if (matched_events != idnum) {
- /* Not a whole match */
- return NULL;
+ if (matched_events < ids_size) {
+ free(metric_events);
+ return -EINVAL;
}
-
- metric_events[idnum] = NULL;
-
- for (i = 0; i < idnum; i++) {
+ for (i = 0; i < ids_size; i++) {
ev = metric_events[i];
- /* Don't free the used events. */
- set_bit(ev->core.idx, evlist_used);
+ ev->collect_stat = true;
+
/*
- * The metric leader points to the identically named event in
- * metric_events.
+ * The metric leader points to the identically named
+ * event in metric_events.
*/
ev->metric_leader = ev;
/*
- * Mark two events with identical names in the same group (or
- * globally) as being in use as uncore events may be duplicated
- * for each pmu. Set the metric leader of such events to be the
- * event that appears in metric_events.
+ * Mark two events with identical names in the same
+ * group (or globally) as being in use as uncore events
+ * may be duplicated for each pmu. Set the metric leader
+ * of such events to be the event that appears in
+ * metric_events.
*/
- evlist__for_each_entry_continue(perf_evlist, ev) {
- /*
- * If events are grouped then the search can terminate
- * when then group is left.
- */
- if (!has_constraint &&
- ev->core.leader != metric_events[i]->core.leader &&
- evsel_same_pmu_or_none(evsel__leader(ev), evsel__leader(metric_events[i])))
- break;
- if (!strcmp(metric_events[i]->name, ev->name)) {
- set_bit(ev->core.idx, evlist_used);
+ metric_id = evsel__metric_id(ev);
+ evlist__for_each_entry_continue(metric_evlist, ev) {
+ if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
ev->metric_leader = metric_events[i];
- }
- }
- }
-
- return metric_events[0];
-}
-
-static int metricgroup__setup_events(struct list_head *groups,
- bool metric_no_merge,
- struct evlist *perf_evlist,
- struct rblist *metric_events_list)
-{
- struct metric_event *me;
- struct metric_expr *expr;
- int i = 0;
- int ret = 0;
- struct metric *m;
- struct evsel *evsel, *tmp;
- unsigned long *evlist_used;
-
- evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
- if (!evlist_used)
- return -ENOMEM;
-
- list_for_each_entry (m, groups, nd) {
- struct evsel **metric_events;
- struct metric_ref *metric_refs = NULL;
-
- metric_events = calloc(sizeof(void *),
- hashmap__size(&m->pctx.ids) + 1);
- if (!metric_events) {
- ret = -ENOMEM;
- break;
- }
- evsel = find_evsel_group(perf_evlist, &m->pctx,
- metric_no_merge,
- m->has_constraint, metric_events,
- evlist_used);
- if (!evsel) {
- pr_debug("Cannot resolve %s: %s\n",
- m->metric_name, m->metric_expr);
- free(metric_events);
- continue;
- }
- for (i = 0; metric_events[i]; i++)
- metric_events[i]->collect_stat = true;
- me = metricgroup__lookup(metric_events_list, evsel, true);
- if (!me) {
- ret = -ENOMEM;
- free(metric_events);
- break;
- }
- expr = malloc(sizeof(struct metric_expr));
- if (!expr) {
- ret = -ENOMEM;
- free(metric_events);
- break;
- }
-
- /*
- * Collect and store collected nested expressions
- * for metric processing.
- */
- if (m->metric_refs_cnt) {
- struct metric_ref_node *ref;
-
- metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
- if (!metric_refs) {
- ret = -ENOMEM;
- free(metric_events);
- free(expr);
- break;
- }
-
- i = 0;
- list_for_each_entry(ref, &m->metric_refs, list) {
- /*
- * Intentionally passing just const char pointers,
- * originally from 'struct pmu_event' object.
- * We don't need to change them, so there's no
- * need to create our own copy.
- */
- metric_refs[i].metric_name = ref->metric_name;
- metric_refs[i].metric_expr = ref->metric_expr;
- i++;
- }
- }
-
- expr->metric_refs = metric_refs;
- expr->metric_expr = m->metric_expr;
- expr->metric_name = m->metric_name;
- expr->metric_unit = m->metric_unit;
- expr->metric_events = metric_events;
- expr->runtime = m->runtime;
- list_add(&expr->nd, &me->head);
- }
-
- evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
- if (!test_bit(evsel->core.idx, evlist_used)) {
- evlist__remove(perf_evlist, evsel);
- evsel__delete(evsel);
}
}
- bitmap_free(evlist_used);
-
- return ret;
+ *out_metric_events = metric_events;
+ return 0;
}
static bool match_metric(const char *n, const char *list)
@@ -422,7 +342,7 @@ static bool match_metric(const char *n, const char *list)
return false;
}
-static bool match_pe_metric(struct pmu_event *pe, const char *metric)
+static bool match_pe_metric(const struct pmu_event *pe, const char *metric)
{
return match_metric(pe->metric_group, metric) ||
match_metric(pe->metric_name, metric);
@@ -506,7 +426,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
putchar('\n');
}
-static int metricgroup__print_pmu_event(struct pmu_event *pe,
+static int metricgroup__print_pmu_event(const struct pmu_event *pe,
bool metricgroups, char *filter,
bool raw, bool details,
struct rblist *groups,
@@ -581,14 +501,14 @@ struct metricgroup_print_sys_idata {
bool details;
};
-typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
+typedef int (*metricgroup_sys_event_iter_fn)(const struct pmu_event *pe, void *);
struct metricgroup_iter_data {
metricgroup_sys_event_iter_fn fn;
void *data;
};
-static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
+static int metricgroup__sys_event_iter(const struct pmu_event *pe, void *data)
{
struct metricgroup_iter_data *d = data;
struct perf_pmu *pmu = NULL;
@@ -607,7 +527,7 @@ static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
return 0;
}
-static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
+static int metricgroup__print_sys_event_iter(const struct pmu_event *pe, void *data)
{
struct metricgroup_print_sys_idata *d = data;
@@ -616,10 +536,10 @@ static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
- bool raw, bool details)
+ bool raw, bool details, const char *pmu_name)
{
- struct pmu_events_map *map = pmu_events_map__find();
- struct pmu_event *pe;
+ const struct pmu_events_map *map = pmu_events_map__find();
+ const struct pmu_event *pe;
int i;
struct rblist groups;
struct rb_node *node, *next;
@@ -642,6 +562,10 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
break;
if (!pe->metric_expr)
continue;
+ if (pmu_name && perf_pmu__is_hybrid(pe->pmu) &&
+ strcmp(pmu_name, pe->pmu)) {
+ continue;
+ }
if (metricgroup__print_pmu_event(pe, metricgroups, filter,
raw, details, &groups,
metriclist) < 0)
@@ -686,150 +610,391 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
strlist__delete(metriclist);
}
-static void metricgroup__add_metric_weak_group(struct strbuf *events,
- struct expr_parse_ctx *ctx)
+static const char *code_characters = ",-=@";
+
+static int encode_metric_id(struct strbuf *sb, const char *x)
{
- struct hashmap_entry *cur;
- size_t bkt;
- bool no_group = true, has_duration = false;
+ char *c;
+ int ret = 0;
- hashmap__for_each_entry((&ctx->ids), cur, bkt) {
- pr_debug("found event %s\n", (const char *)cur->key);
- /*
- * Duration time maps to a software event and can make
- * groups not count. Always use it outside a
- * group.
- */
- if (!strcmp(cur->key, "duration_time")) {
- has_duration = true;
- continue;
+ for (; *x; x++) {
+ c = strchr(code_characters, *x);
+ if (c) {
+ ret = strbuf_addch(sb, '!');
+ if (ret)
+ break;
+
+ ret = strbuf_addch(sb, '0' + (c - code_characters));
+ if (ret)
+ break;
+ } else {
+ ret = strbuf_addch(sb, *x);
+ if (ret)
+ break;
}
- strbuf_addf(events, "%s%s",
- no_group ? "{" : ",",
- (const char *)cur->key);
- no_group = false;
}
- if (!no_group) {
- strbuf_addf(events, "}:W");
- if (has_duration)
- strbuf_addf(events, ",duration_time");
- } else if (has_duration)
- strbuf_addf(events, "duration_time");
+ return ret;
}
-static void metricgroup__add_metric_non_group(struct strbuf *events,
- struct expr_parse_ctx *ctx)
+static int decode_metric_id(struct strbuf *sb, const char *x)
{
- struct hashmap_entry *cur;
- size_t bkt;
- bool first = true;
+ const char *orig = x;
+ size_t i;
+ char c;
+ int ret;
- hashmap__for_each_entry((&ctx->ids), cur, bkt) {
- if (!first)
- strbuf_addf(events, ",");
- strbuf_addf(events, "%s", (const char *)cur->key);
- first = false;
+ for (; *x; x++) {
+ c = *x;
+ if (*x == '!') {
+ x++;
+ i = *x - '0';
+ if (i > strlen(code_characters)) {
+ pr_err("Bad metric-id encoding in: '%s'", orig);
+ return -1;
+ }
+ c = code_characters[i];
+ }
+ ret = strbuf_addch(sb, c);
+ if (ret)
+ return ret;
}
+ return 0;
}
-static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
+static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
{
- static bool violate_nmi_constraint;
+ struct evsel *ev;
+ struct strbuf sb = STRBUF_INIT;
+ char *cur;
+ int ret = 0;
- if (!foot) {
- pr_warning("Splitting metric group %s into standalone metrics.\n", name);
- violate_nmi_constraint = true;
- return;
- }
+ evlist__for_each_entry(perf_evlist, ev) {
+ if (!ev->metric_id)
+ continue;
- if (!violate_nmi_constraint)
- return;
+ ret = strbuf_setlen(&sb, 0);
+ if (ret)
+ break;
- pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
- " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
- " perf stat ...\n"
- " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
+ ret = decode_metric_id(&sb, ev->metric_id);
+ if (ret)
+ break;
+
+ free((char *)ev->metric_id);
+ ev->metric_id = strdup(sb.buf);
+ if (!ev->metric_id) {
+ ret = -ENOMEM;
+ break;
+ }
+ /*
+ * If the name is just the parsed event, use the metric-id to
+ * give a more friendly display version.
+ */
+ if (strstr(ev->name, "metric-id=")) {
+ bool has_slash = false;
+
+ free(ev->name);
+ for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
+ *cur = '/';
+ has_slash = true;
+ }
+
+ if (modifier) {
+ if (!has_slash && !strchr(sb.buf, ':')) {
+ ret = strbuf_addch(&sb, ':');
+ if (ret)
+ break;
+ }
+ ret = strbuf_addstr(&sb, modifier);
+ if (ret)
+ break;
+ }
+ ev->name = strdup(sb.buf);
+ if (!ev->name) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ }
+ strbuf_release(&sb);
+ return ret;
}
-static bool metricgroup__has_constraint(struct pmu_event *pe)
+static int metricgroup__build_event_string(struct strbuf *events,
+ const struct expr_parse_ctx *ctx,
+ const char *modifier,
+ bool has_constraint)
{
- if (!pe->metric_constraint)
- return false;
+ struct hashmap_entry *cur;
+ size_t bkt;
+ bool no_group = true, has_duration = false;
+ int ret = 0;
- if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
- sysctl__nmi_watchdog_enabled()) {
- metricgroup___watchdog_constraint_hint(pe->metric_name, false);
- return true;
+#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
+
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
+ const char *sep, *rsep, *id = cur->key;
+
+ pr_debug("found event %s\n", id);
+ /*
+ * Duration time maps to a software event and can make
+ * groups not count. Always use it outside a
+ * group.
+ */
+ if (!strcmp(id, "duration_time")) {
+ has_duration = true;
+ continue;
+ }
+ /* Separate events with commas and open the group if necessary. */
+ if (no_group) {
+ if (!has_constraint) {
+ ret = strbuf_addch(events, '{');
+ RETURN_IF_NON_ZERO(ret);
+ }
+
+ no_group = false;
+ } else {
+ ret = strbuf_addch(events, ',');
+ RETURN_IF_NON_ZERO(ret);
+ }
+ /*
+ * Encode the ID as an event string. Add a qualifier for
+ * metric_id that is the original name except with characters
+ * that parse-events can't parse replaced. For example,
+ * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
+ */
+ sep = strchr(id, '@');
+ if (sep != NULL) {
+ ret = strbuf_add(events, id, sep - id);
+ RETURN_IF_NON_ZERO(ret);
+ ret = strbuf_addch(events, '/');
+ RETURN_IF_NON_ZERO(ret);
+ rsep = strrchr(sep, '@');
+ ret = strbuf_add(events, sep + 1, rsep - sep - 1);
+ RETURN_IF_NON_ZERO(ret);
+ ret = strbuf_addstr(events, ",metric-id=");
+ RETURN_IF_NON_ZERO(ret);
+ sep = rsep;
+ } else {
+ sep = strchr(id, ':');
+ if (sep != NULL) {
+ ret = strbuf_add(events, id, sep - id);
+ RETURN_IF_NON_ZERO(ret);
+ } else {
+ ret = strbuf_addstr(events, id);
+ RETURN_IF_NON_ZERO(ret);
+ }
+ ret = strbuf_addstr(events, "/metric-id=");
+ RETURN_IF_NON_ZERO(ret);
+ }
+ ret = encode_metric_id(events, id);
+ RETURN_IF_NON_ZERO(ret);
+ ret = strbuf_addstr(events, "/");
+ RETURN_IF_NON_ZERO(ret);
+
+ if (sep != NULL) {
+ ret = strbuf_addstr(events, sep + 1);
+ RETURN_IF_NON_ZERO(ret);
+ }
+ if (modifier) {
+ ret = strbuf_addstr(events, modifier);
+ RETURN_IF_NON_ZERO(ret);
+ }
}
+ if (has_duration) {
+ if (no_group) {
+ /* Strange case of a metric of just duration_time. */
+ ret = strbuf_addf(events, "duration_time");
+ } else if (!has_constraint)
+ ret = strbuf_addf(events, "}:W,duration_time");
+ else
+ ret = strbuf_addf(events, ",duration_time");
+ } else if (!no_group && !has_constraint)
+ ret = strbuf_addf(events, "}:W");
- return false;
+ return ret;
+#undef RETURN_IF_NON_ZERO
}
-int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
+int __weak arch_get_runtimeparam(const struct pmu_event *pe __maybe_unused)
{
return 1;
}
+/*
+ * A singly linked list on the stack of the names of metrics being
+ * processed. Used to identify recursion.
+ */
+struct visited_metric {
+ const char *name;
+ const struct visited_metric *parent;
+};
+
struct metricgroup_add_iter_data {
struct list_head *metric_list;
- const char *metric;
- struct expr_ids *ids;
+ const char *metric_name;
+ const char *modifier;
int *ret;
bool *has_match;
bool metric_no_group;
+ struct metric *root_metric;
+ const struct visited_metric *visited;
+ const struct pmu_events_map *map;
};
+static int add_metric(struct list_head *metric_list,
+ const struct pmu_event *pe,
+ const char *modifier,
+ bool metric_no_group,
+ struct metric *root_metric,
+ const struct visited_metric *visited,
+ const struct pmu_events_map *map);
+
+/**
+ * resolve_metric - Locate metrics within the root metric and recursively add
+ * references to them.
+ * @metric_list: The list the metric is added to.
+ * @modifier: if non-null event modifiers like "u".
+ * @metric_no_group: Should events written to events be grouped "{}" or
+ * global. Grouping is the default but due to multiplexing the
+ * user may override.
+ * @root_metric: Metrics may reference other metrics to form a tree. In this
+ * case the root_metric holds all the IDs and a list of referenced
+ * metrics. When adding a root this argument is NULL.
+ * @visited: A singly linked list of metric names being added that is used to
+ * detect recursion.
+ * @map: The map that is searched for metrics, most commonly the table for the
+ * architecture perf is running upon.
+ */
+static int resolve_metric(struct list_head *metric_list,
+ const char *modifier,
+ bool metric_no_group,
+ struct metric *root_metric,
+ const struct visited_metric *visited,
+ const struct pmu_events_map *map)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+ struct to_resolve {
+ /* The metric to resolve. */
+ const struct pmu_event *pe;
+ /*
+ * The key in the IDs map, this may differ from in case,
+ * etc. from pe->metric_name.
+ */
+ const char *key;
+ } *pending = NULL;
+ int i, ret = 0, pending_cnt = 0;
+
+ /*
+ * Iterate all the parsed IDs and if there's a matching metric and it to
+ * the pending array.
+ */
+ hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
+ const struct pmu_event *pe;
+
+ pe = metricgroup__find_metric(cur->key, map);
+ if (pe) {
+ pending = realloc(pending,
+ (pending_cnt + 1) * sizeof(struct to_resolve));
+ if (!pending)
+ return -ENOMEM;
+
+ pending[pending_cnt].pe = pe;
+ pending[pending_cnt].key = cur->key;
+ pending_cnt++;
+ }
+ }
+
+ /* Remove the metric IDs from the context. */
+ for (i = 0; i < pending_cnt; i++)
+ expr__del_id(root_metric->pctx, pending[i].key);
+
+ /*
+ * Recursively add all the metrics, IDs are added to the root metric's
+ * context.
+ */
+ for (i = 0; i < pending_cnt; i++) {
+ ret = add_metric(metric_list, pending[i].pe, modifier, metric_no_group,
+ root_metric, visited, map);
+ if (ret)
+ break;
+ }
+
+ free(pending);
+ return ret;
+}
+
+/**
+ * __add_metric - Add a metric to metric_list.
+ * @metric_list: The list the metric is added to.
+ * @pe: The pmu_event containing the metric to be added.
+ * @modifier: if non-null event modifiers like "u".
+ * @metric_no_group: Should events written to events be grouped "{}" or
+ * global. Grouping is the default but due to multiplexing the
+ * user may override.
+ * @runtime: A special argument for the parser only known at runtime.
+ * @root_metric: Metrics may reference other metrics to form a tree. In this
+ * case the root_metric holds all the IDs and a list of referenced
+ * metrics. When adding a root this argument is NULL.
+ * @visited: A singly linked list of metric names being added that is used to
+ * detect recursion.
+ * @map: The map that is searched for metrics, most commonly the table for the
+ * architecture perf is running upon.
+ */
static int __add_metric(struct list_head *metric_list,
- struct pmu_event *pe,
+ const struct pmu_event *pe,
+ const char *modifier,
bool metric_no_group,
int runtime,
- struct metric **mp,
- struct expr_id *parent,
- struct expr_ids *ids)
+ struct metric *root_metric,
+ const struct visited_metric *visited,
+ const struct pmu_events_map *map)
{
- struct metric_ref_node *ref;
- struct metric *m;
+ const struct visited_metric *vm;
+ int ret;
+ bool is_root = !root_metric;
+ struct visited_metric visited_node = {
+ .name = pe->metric_name,
+ .parent = visited,
+ };
- if (*mp == NULL) {
+ for (vm = visited; vm; vm = vm->parent) {
+ if (!strcmp(pe->metric_name, vm->name)) {
+ pr_err("failed: recursion detected for %s\n", pe->metric_name);
+ return -1;
+ }
+ }
+
+ if (is_root) {
/*
- * We got in here for the parent group,
- * allocate it and put it on the list.
+ * This metric is the root of a tree and may reference other
+ * metrics that are added recursively.
*/
- m = zalloc(sizeof(*m));
- if (!m)
+ root_metric = metric__new(pe, modifier, metric_no_group, runtime);
+ if (!root_metric)
return -ENOMEM;
- expr__ctx_init(&m->pctx);
- m->metric_name = pe->metric_name;
- m->metric_expr = pe->metric_expr;
- m->metric_unit = pe->unit;
- m->runtime = runtime;
- m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
- INIT_LIST_HEAD(&m->metric_refs);
- m->metric_refs_cnt = 0;
-
- parent = expr_ids__alloc(ids);
- if (!parent) {
- free(m);
- return -EINVAL;
- }
-
- parent->id = strdup(pe->metric_name);
- if (!parent->id) {
- free(m);
- return -ENOMEM;
- }
- *mp = m;
} else {
+ int cnt = 0;
+
/*
- * We got here for the referenced metric, via the
- * recursive metricgroup__add_metric call, add
- * it to the parent group.
+ * This metric was referenced in a metric higher in the
+ * tree. Check if the same metric is already resolved in the
+ * metric_refs list.
*/
- m = *mp;
+ if (root_metric->metric_refs) {
+ for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
+ if (!strcmp(pe->metric_name,
+ root_metric->metric_refs[cnt].metric_name))
+ return 0;
+ }
+ }
- ref = malloc(sizeof(*ref));
- if (!ref)
+ /* Create reference. Need space for the entry and the terminator. */
+ root_metric->metric_refs = realloc(root_metric->metric_refs,
+ (cnt + 2) * sizeof(struct metric_ref));
+ if (!root_metric->metric_refs)
return -ENOMEM;
/*
@@ -838,54 +1003,35 @@ static int __add_metric(struct list_head *metric_list,
* need to change them, so there's no need to create
* our own copy.
*/
- ref->metric_name = pe->metric_name;
- ref->metric_expr = pe->metric_expr;
+ root_metric->metric_refs[cnt].metric_name = pe->metric_name;
+ root_metric->metric_refs[cnt].metric_expr = pe->metric_expr;
- list_add(&ref->list, &m->metric_refs);
- m->metric_refs_cnt++;
+ /* Null terminate array. */
+ root_metric->metric_refs[cnt+1].metric_name = NULL;
+ root_metric->metric_refs[cnt+1].metric_expr = NULL;
}
- /* Force all found IDs in metric to have us as parent ID. */
- WARN_ON_ONCE(!parent);
- m->pctx.parent = parent;
-
/*
* For both the parent and referenced metrics, we parse
- * all the metric's IDs and add it to the parent context.
+ * all the metric's IDs and add it to the root context.
*/
- if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
- if (m->metric_refs_cnt == 0) {
- expr__ctx_clear(&m->pctx);
- free(m);
- *mp = NULL;
- }
- return -EINVAL;
+ if (expr__find_ids(pe->metric_expr, NULL, root_metric->pctx) < 0) {
+ /* Broken metric. */
+ ret = -EINVAL;
+ } else {
+ /* Resolve referenced metrics. */
+ ret = resolve_metric(metric_list, modifier, metric_no_group, root_metric,
+ &visited_node, map);
}
- /*
- * We add new group only in the 'parent' call,
- * so bail out for referenced metric case.
- */
- if (m->metric_refs_cnt)
- return 0;
-
- if (list_empty(metric_list))
- list_add(&m->nd, metric_list);
- else {
- struct list_head *pos;
+ if (ret) {
+ if (is_root)
+ metric__free(root_metric);
- /* Place the largest groups at the front. */
- list_for_each_prev(pos, metric_list) {
- struct metric *old = list_entry(pos, struct metric, nd);
+ } else if (is_root)
+ list_add(&root_metric->nd, metric_list);
- if (hashmap__size(&m->pctx.ids) <=
- hashmap__size(&old->pctx.ids))
- break;
- }
- list_add(&m->nd, pos);
- }
-
- return 0;
+ return ret;
}
#define map_for_each_event(__pe, __idx, __map) \
@@ -900,10 +1046,10 @@ static int __add_metric(struct list_head *metric_list,
(match_metric(__pe->metric_group, __metric) || \
match_metric(__pe->metric_name, __metric)))
-struct pmu_event *metricgroup__find_metric(const char *metric,
- struct pmu_events_map *map)
+const struct pmu_event *metricgroup__find_metric(const char *metric,
+ const struct pmu_events_map *map)
{
- struct pmu_event *pe;
+ const struct pmu_event *pe;
int i;
map_for_each_event(pe, i, map) {
@@ -914,136 +1060,21 @@ struct pmu_event *metricgroup__find_metric(const char *metric,
return NULL;
}
-static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
- struct expr_ids *ids)
-{
- struct expr_id_data *data;
- struct expr_id *p;
- int ret;
-
- /*
- * We get the parent referenced by 'id' argument and
- * traverse through all the parent object IDs to check
- * if we already processed 'id', if we did, it's recursion
- * and we fail.
- */
- ret = expr__get_id(&m->pctx, id, &data);
- if (ret)
- return ret;
-
- p = expr_id_data__parent(data);
-
- while (p->parent) {
- if (!strcmp(p->id, id)) {
- pr_err("failed: recursion detected for %s\n", id);
- return -1;
- }
- p = p->parent;
- }
-
- /*
- * If we are over the limit of static entris, the metric
- * is too difficult/nested to process, fail as well.
- */
- p = expr_ids__alloc(ids);
- if (!p) {
- pr_err("failed: too many nested metrics\n");
- return -EINVAL;
- }
-
- p->id = strdup(id);
- p->parent = expr_id_data__parent(data);
- *parent = p;
-
- return p->id ? 0 : -ENOMEM;
-}
-
-static int add_metric(struct list_head *metric_list,
- struct pmu_event *pe,
- bool metric_no_group,
- struct metric **mp,
- struct expr_id *parent,
- struct expr_ids *ids);
-
-static int __resolve_metric(struct metric *m,
- bool metric_no_group,
- struct list_head *metric_list,
- struct pmu_events_map *map,
- struct expr_ids *ids)
-{
- struct hashmap_entry *cur;
- size_t bkt;
- bool all;
- int ret;
-
- /*
- * Iterate all the parsed IDs and if there's metric,
- * add it to the context.
- */
- do {
- all = true;
- hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
- struct expr_id *parent;
- struct pmu_event *pe;
-
- pe = metricgroup__find_metric(cur->key, map);
- if (!pe)
- continue;
-
- ret = recursion_check(m, cur->key, &parent, ids);
- if (ret)
- return ret;
-
- all = false;
- /* The metric key itself needs to go out.. */
- expr__del_id(&m->pctx, cur->key);
-
- /* ... and it gets resolved to the parent context. */
- ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
- if (ret)
- return ret;
-
- /*
- * We added new metric to hashmap, so we need
- * to break the iteration and start over.
- */
- break;
- }
- } while (!all);
-
- return 0;
-}
-
-static int resolve_metric(bool metric_no_group,
- struct list_head *metric_list,
- struct pmu_events_map *map,
- struct expr_ids *ids)
-{
- struct metric *m;
- int err;
-
- list_for_each_entry(m, metric_list, nd) {
- err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
- if (err)
- return err;
- }
- return 0;
-}
-
static int add_metric(struct list_head *metric_list,
- struct pmu_event *pe,
+ const struct pmu_event *pe,
+ const char *modifier,
bool metric_no_group,
- struct metric **m,
- struct expr_id *parent,
- struct expr_ids *ids)
+ struct metric *root_metric,
+ const struct visited_metric *visited,
+ const struct pmu_events_map *map)
{
- struct metric *orig = *m;
int ret = 0;
pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
if (!strstr(pe->metric_expr, "?")) {
- ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
+ ret = __add_metric(metric_list, pe, modifier, metric_no_group, 0,
+ root_metric, visited, map);
} else {
int j, count;
@@ -1054,29 +1085,25 @@ static int add_metric(struct list_head *metric_list,
* those events to metric_list.
*/
- for (j = 0; j < count && !ret; j++, *m = orig)
- ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
+ for (j = 0; j < count && !ret; j++)
+ ret = __add_metric(metric_list, pe, modifier, metric_no_group, j,
+ root_metric, visited, map);
}
return ret;
}
-static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
+static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
void *data)
{
struct metricgroup_add_iter_data *d = data;
- struct metric *m = NULL;
int ret;
- if (!match_pe_metric(pe, d->metric))
+ if (!match_pe_metric(pe, d->metric_name))
return 0;
- ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
- if (ret)
- goto out;
-
- ret = resolve_metric(d->metric_no_group,
- d->metric_list, NULL, d->ids);
+ ret = add_metric(d->metric_list, pe, d->modifier, d->metric_no_group,
+ d->root_metric, d->visited, d->map);
if (ret)
goto out;
@@ -1087,32 +1114,47 @@ out:
return ret;
}
-static int metricgroup__add_metric(const char *metric, bool metric_no_group,
- struct strbuf *events,
+static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
+ const struct list_head *r)
+{
+ const struct metric *left = container_of(l, struct metric, nd);
+ const struct metric *right = container_of(r, struct metric, nd);
+
+ return hashmap__size(right->pctx->ids) - hashmap__size(left->pctx->ids);
+}
+
+/**
+ * metricgroup__add_metric - Find and add a metric, or a metric group.
+ * @metric_name: The name of the metric or metric group. For example, "IPC"
+ * could be the name of a metric and "TopDownL1" the name of a
+ * metric group.
+ * @modifier: if non-null event modifiers like "u".
+ * @metric_no_group: Should events written to events be grouped "{}" or
+ * global. Grouping is the default but due to multiplexing the
+ * user may override.
+ * @metric_list: The list that the metric or metric group are added to.
+ * @map: The map that is searched for metrics, most commonly the table for the
+ * architecture perf is running upon.
+ */
+static int metricgroup__add_metric(const char *metric_name, const char *modifier,
+ bool metric_no_group,
struct list_head *metric_list,
- struct pmu_events_map *map)
+ const struct pmu_events_map *map)
{
- struct expr_ids ids = { .cnt = 0, };
- struct pmu_event *pe;
- struct metric *m;
+ const struct pmu_event *pe;
LIST_HEAD(list);
int i, ret;
bool has_match = false;
- map_for_each_metric(pe, i, map, metric) {
+ /*
+ * Iterate over all metrics seeing if metric matches either the name or
+ * group. When it does add the metric to the list.
+ */
+ map_for_each_metric(pe, i, map, metric_name) {
has_match = true;
- m = NULL;
-
- ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
- if (ret)
- goto out;
-
- /*
- * Process any possible referenced metrics
- * included in the expression.
- */
- ret = resolve_metric(metric_no_group,
- &list, map, &ids);
+ ret = add_metric(&list, pe, modifier, metric_no_group,
+ /*root_metric=*/NULL,
+ /*visited_metrics=*/NULL, map);
if (ret)
goto out;
}
@@ -1122,34 +1164,20 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
.fn = metricgroup__add_metric_sys_event_iter,
.data = (void *) &(struct metricgroup_add_iter_data) {
.metric_list = &list,
- .metric = metric,
+ .metric_name = metric_name,
+ .modifier = modifier,
.metric_no_group = metric_no_group,
- .ids = &ids,
.has_match = &has_match,
.ret = &ret,
+ .map = map,
},
};
pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
}
/* End of pmu events. */
- if (!has_match) {
+ if (!has_match)
ret = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(m, &list, nd) {
- if (events->len > 0)
- strbuf_addf(events, ",");
-
- if (m->has_constraint) {
- metricgroup__add_metric_non_group(events,
- &m->pctx);
- } else {
- metricgroup__add_metric_weak_group(events,
- &m->pctx);
- }
- }
out:
/*
@@ -1157,95 +1185,312 @@ out:
* even if it's failed
*/
list_splice(&list, metric_list);
- expr_ids__exit(&ids);
return ret;
}
+/**
+ * metricgroup__add_metric_list - Find and add metrics, or metric groups,
+ * specified in a list.
+ * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
+ * would match the IPC and CPI metrics, and TopDownL1 would match all
+ * the metrics in the TopDownL1 group.
+ * @metric_no_group: Should events written to events be grouped "{}" or
+ * global. Grouping is the default but due to multiplexing the
+ * user may override.
+ * @metric_list: The list that metrics are added to.
+ * @map: The map that is searched for metrics, most commonly the table for the
+ * architecture perf is running upon.
+ */
static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
- struct strbuf *events,
struct list_head *metric_list,
- struct pmu_events_map *map)
+ const struct pmu_events_map *map)
{
- char *llist, *nlist, *p;
- int ret = -EINVAL;
+ char *list_itr, *list_copy, *metric_name, *modifier;
+ int ret, count = 0;
- nlist = strdup(list);
- if (!nlist)
+ list_copy = strdup(list);
+ if (!list_copy)
return -ENOMEM;
- llist = nlist;
+ list_itr = list_copy;
+
+ while ((metric_name = strsep(&list_itr, ",")) != NULL) {
+ modifier = strchr(metric_name, ':');
+ if (modifier)
+ *modifier++ = '\0';
- strbuf_init(events, 100);
- strbuf_addf(events, "%s", "");
+ ret = metricgroup__add_metric(metric_name, modifier,
+ metric_no_group, metric_list,
+ map);
+ if (ret == -EINVAL)
+ pr_err("Cannot find metric or group `%s'\n", metric_name);
- while ((p = strsep(&llist, ",")) != NULL) {
- ret = metricgroup__add_metric(p, metric_no_group, events,
- metric_list, map);
- if (ret == -EINVAL) {
- fprintf(stderr, "Cannot find metric or group `%s'\n",
- p);
+ if (ret)
break;
- }
+
+ count++;
}
- free(nlist);
+ free(list_copy);
- if (!ret)
+ if (!ret) {
+ /*
+ * Warn about nmi_watchdog if any parsed metrics had the
+ * NO_NMI_WATCHDOG constraint.
+ */
metricgroup___watchdog_constraint_hint(NULL, true);
-
+ /* No metrics. */
+ if (count == 0)
+ return -EINVAL;
+ }
return ret;
}
-static void metric__free_refs(struct metric *metric)
+static void metricgroup__free_metrics(struct list_head *metric_list)
{
- struct metric_ref_node *ref, *tmp;
+ struct metric *m, *tmp;
- list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
- list_del(&ref->list);
- free(ref);
+ list_for_each_entry_safe (m, tmp, metric_list, nd) {
+ list_del_init(&m->nd);
+ metric__free(m);
}
}
-static void metricgroup__free_metrics(struct list_head *metric_list)
+/**
+ * build_combined_expr_ctx - Make an expr_parse_ctx with all has_constraint
+ * metric IDs, as the IDs are held in a set,
+ * duplicates will be removed.
+ * @metric_list: List to take metrics from.
+ * @combined: Out argument for result.
+ */
+static int build_combined_expr_ctx(const struct list_head *metric_list,
+ struct expr_parse_ctx **combined)
{
- struct metric *m, *tmp;
+ struct hashmap_entry *cur;
+ size_t bkt;
+ struct metric *m;
+ char *dup;
+ int ret;
- list_for_each_entry_safe (m, tmp, metric_list, nd) {
- metric__free_refs(m);
- expr__ctx_clear(&m->pctx);
- list_del_init(&m->nd);
- free(m);
+ *combined = expr__ctx_new();
+ if (!*combined)
+ return -ENOMEM;
+
+ list_for_each_entry(m, metric_list, nd) {
+ if (m->has_constraint && !m->modifier) {
+ hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
+ dup = strdup(cur->key);
+ if (!dup) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ ret = expr__add_id(*combined, dup);
+ if (ret)
+ goto err_out;
+ }
+ }
}
+ return 0;
+err_out:
+ expr__ctx_free(*combined);
+ *combined = NULL;
+ return ret;
+}
+
+/**
+ * parse_ids - Build the event string for the ids and parse them creating an
+ * evlist. The encoded metric_ids are decoded.
+ * @fake_pmu: used when testing metrics not supported by the current CPU.
+ * @ids: the event identifiers parsed from a metric.
+ * @modifier: any modifiers added to the events.
+ * @has_constraint: false if events should be placed in a weak group.
+ * @out_evlist: the created list of events.
+ */
+static int parse_ids(struct perf_pmu *fake_pmu, struct expr_parse_ctx *ids,
+ const char *modifier, bool has_constraint, struct evlist **out_evlist)
+{
+ struct parse_events_error parse_error;
+ struct evlist *parsed_evlist;
+ struct strbuf events = STRBUF_INIT;
+ int ret;
+
+ *out_evlist = NULL;
+ if (hashmap__size(ids->ids) == 0) {
+ char *tmp;
+ /*
+ * No ids/events in the expression parsing context. Events may
+ * have been removed because of constant evaluation, e.g.:
+ * event1 if #smt_on else 0
+ * Add a duration_time event to avoid a parse error on an empty
+ * string.
+ */
+ tmp = strdup("duration_time");
+ if (!tmp)
+ return -ENOMEM;
+
+ ids__insert(ids->ids, tmp);
+ }
+ ret = metricgroup__build_event_string(&events, ids, modifier,
+ has_constraint);
+ if (ret)
+ return ret;
+
+ parsed_evlist = evlist__new();
+ if (!parsed_evlist) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ pr_debug("Parsing metric events '%s'\n", events.buf);
+ bzero(&parse_error, sizeof(parse_error));
+ ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu);
+ if (ret) {
+ parse_events_print_error(&parse_error, events.buf);
+ goto err_out;
+ }
+ ret = decode_all_metric_ids(parsed_evlist, modifier);
+ if (ret)
+ goto err_out;
+
+ *out_evlist = parsed_evlist;
+ parsed_evlist = NULL;
+err_out:
+ evlist__delete(parsed_evlist);
+ strbuf_release(&events);
+ return ret;
}
static int parse_groups(struct evlist *perf_evlist, const char *str,
bool metric_no_group,
bool metric_no_merge,
struct perf_pmu *fake_pmu,
- struct rblist *metric_events,
- struct pmu_events_map *map)
+ struct rblist *metric_events_list,
+ const struct pmu_events_map *map)
{
- struct parse_events_error parse_error;
- struct strbuf extra_events;
+ struct evlist *combined_evlist = NULL;
LIST_HEAD(metric_list);
+ struct metric *m;
int ret;
- if (metric_events->nr_entries == 0)
- metricgroup__rblist_init(metric_events);
+ if (metric_events_list->nr_entries == 0)
+ metricgroup__rblist_init(metric_events_list);
ret = metricgroup__add_metric_list(str, metric_no_group,
- &extra_events, &metric_list, map);
+ &metric_list, map);
if (ret)
goto out;
- pr_debug("adding %s\n", extra_events.buf);
- bzero(&parse_error, sizeof(parse_error));
- ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
- if (ret) {
- parse_events_print_error(&parse_error, extra_events.buf);
- goto out;
+
+ /* Sort metrics from largest to smallest. */
+ list_sort(NULL, &metric_list, metric_list_cmp);
+
+ if (!metric_no_merge) {
+ struct expr_parse_ctx *combined = NULL;
+
+ ret = build_combined_expr_ctx(&metric_list, &combined);
+
+ if (!ret && combined && hashmap__size(combined->ids)) {
+ ret = parse_ids(fake_pmu, combined, /*modifier=*/NULL,
+ /*has_constraint=*/true,
+ &combined_evlist);
+ }
+ if (combined)
+ expr__ctx_free(combined);
+
+ if (ret)
+ goto out;
}
- ret = metricgroup__setup_events(&metric_list, metric_no_merge,
- perf_evlist, metric_events);
+
+ list_for_each_entry(m, &metric_list, nd) {
+ struct metric_event *me;
+ struct evsel **metric_events;
+ struct evlist *metric_evlist = NULL;
+ struct metric *n;
+ struct metric_expr *expr;
+
+ if (combined_evlist && m->has_constraint) {
+ metric_evlist = combined_evlist;
+ } else if (!metric_no_merge) {
+ /*
+ * See if the IDs for this metric are a subset of an
+ * earlier metric.
+ */
+ list_for_each_entry(n, &metric_list, nd) {
+ if (m == n)
+ break;
+
+ if (n->evlist == NULL)
+ continue;
+
+ if ((!m->modifier && n->modifier) ||
+ (m->modifier && !n->modifier) ||
+ (m->modifier && n->modifier &&
+ strcmp(m->modifier, n->modifier)))
+ continue;
+
+ if (expr__subset_of_ids(n->pctx, m->pctx)) {
+ pr_debug("Events in '%s' fully contained within '%s'\n",
+ m->metric_name, n->metric_name);
+ metric_evlist = n->evlist;
+ break;
+ }
+
+ }
+ }
+ if (!metric_evlist) {
+ ret = parse_ids(fake_pmu, m->pctx, m->modifier,
+ m->has_constraint, &m->evlist);
+ if (ret)
+ goto out;
+
+ metric_evlist = m->evlist;
+ }
+ ret = setup_metric_events(m->pctx->ids, metric_evlist, &metric_events);
+ if (ret) {
+ pr_debug("Cannot resolve IDs for %s: %s\n",
+ m->metric_name, m->metric_expr);
+ goto out;
+ }
+
+ me = metricgroup__lookup(metric_events_list, metric_events[0], true);
+
+ expr = malloc(sizeof(struct metric_expr));
+ if (!expr) {
+ ret = -ENOMEM;
+ free(metric_events);
+ goto out;
+ }
+
+ expr->metric_refs = m->metric_refs;
+ m->metric_refs = NULL;
+ expr->metric_expr = m->metric_expr;
+ if (m->modifier) {
+ char *tmp;
+
+ if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
+ expr->metric_name = NULL;
+ else
+ expr->metric_name = tmp;
+ } else
+ expr->metric_name = strdup(m->metric_name);
+
+ if (!expr->metric_name) {
+ ret = -ENOMEM;
+ free(metric_events);
+ goto out;
+ }
+ expr->metric_unit = m->metric_unit;
+ expr->metric_events = metric_events;
+ expr->runtime = m->pctx->runtime;
+ list_add(&expr->nd, &me->head);
+ }
+
+
+ if (combined_evlist)
+ evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
+
+ list_for_each_entry(m, &metric_list, nd) {
+ if (m->evlist)
+ evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
+ }
+
out:
metricgroup__free_metrics(&metric_list);
- strbuf_release(&extra_events);
return ret;
}
@@ -1256,14 +1501,14 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events)
{
struct evlist *perf_evlist = *(struct evlist **)opt->value;
- struct pmu_events_map *map = pmu_events_map__find();
+ const struct pmu_events_map *map = pmu_events_map__find();
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, map);
}
int metricgroup__parse_groups_test(struct evlist *evlist,
- struct pmu_events_map *map,
+ const struct pmu_events_map *map,
const char *str,
bool metric_no_group,
bool metric_no_merge,
@@ -1275,8 +1520,8 @@ int metricgroup__parse_groups_test(struct evlist *evlist,
bool metricgroup__has_metric(const char *metric)
{
- struct pmu_events_map *map = pmu_events_map__find();
- struct pmu_event *pe;
+ const struct pmu_events_map *map = pmu_events_map__find();
+ const struct pmu_event *pe;
int i;
if (!map)
@@ -1328,7 +1573,10 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
return -ENOMEM;
new_expr->metric_expr = old_expr->metric_expr;
- new_expr->metric_name = old_expr->metric_name;
+ new_expr->metric_name = strdup(old_expr->metric_name);
+ if (!new_expr->metric_name)
+ return -ENOMEM;
+
new_expr->metric_unit = old_expr->metric_unit;
new_expr->runtime = old_expr->runtime;
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index cc4a92492a61..2b42b778d1bf 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -14,24 +14,51 @@ struct rblist;
struct pmu_events_map;
struct cgroup;
+/**
+ * A node in a rblist keyed by the evsel. The global rblist of metric events
+ * generally exists in perf_stat_config. The evsel is looked up in the rblist
+ * yielding a list of metric_expr.
+ */
struct metric_event {
struct rb_node nd;
struct evsel *evsel;
struct list_head head; /* list of metric_expr */
};
+/**
+ * A metric referenced by a metric_expr. When parsing a metric expression IDs
+ * will be looked up, matching either a value (from metric_events) or a
+ * metric_ref. A metric_ref will then be parsed recursively. The metric_refs and
+ * metric_events need to be known before parsing so that their values may be
+ * placed in the parse context for lookup.
+ */
struct metric_ref {
const char *metric_name;
const char *metric_expr;
};
+/**
+ * One in a list of metric_expr associated with an evsel. The data is used to
+ * generate a metric value during stat output.
+ */
struct metric_expr {
struct list_head nd;
+ /** The expression to parse, for example, "instructions/cycles". */
const char *metric_expr;
+ /** The name of the meric such as "IPC". */
const char *metric_name;
+ /**
+ * The "ScaleUnit" that scales and adds a unit to the metric during
+ * output. For example, "6.4e-05MiB" means to scale the resulting metric
+ * by 6.4e-05 (typically converting a unit like cache lines to something
+ * more human intelligible) and then add "MiB" afterward when displayed.
+ */
const char *metric_unit;
+ /** Null terminated array of events used by the metric. */
struct evsel **metric_events;
+ /** Null terminated array of referenced metrics. */
struct metric_ref *metric_refs;
+ /** A value substituted for '?' during parsing. */
int runtime;
};
@@ -43,19 +70,19 @@ int metricgroup__parse_groups(const struct option *opt,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events);
-struct pmu_event *metricgroup__find_metric(const char *metric,
- struct pmu_events_map *map);
+const struct pmu_event *metricgroup__find_metric(const char *metric,
+ const struct pmu_events_map *map);
int metricgroup__parse_groups_test(struct evlist *evlist,
- struct pmu_events_map *map,
+ const struct pmu_events_map *map,
const char *str,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events);
void metricgroup__print(bool metrics, bool groups, char *filter,
- bool raw, bool details);
+ bool raw, bool details, const char *pmu_name);
bool metricgroup__has_metric(const char *metric);
-int arch_get_runtimeparam(struct pmu_event *pe __maybe_unused);
+int arch_get_runtimeparam(const struct pmu_event *pe __maybe_unused);
void metricgroup__rblist_exit(struct rblist *metric_events);
int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 512dc8b9c168..23ecdba9e670 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -350,3 +350,14 @@ int perf_mmap__push(struct mmap *md, void *to,
out:
return rc;
}
+
+int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone)
+{
+ clone->nbits = original->nbits;
+ clone->bits = bitmap_zalloc(original->nbits);
+ if (!clone->bits)
+ return -ENOMEM;
+
+ memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original));
+ return 0;
+}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index af33118354dd..8e259b9610f8 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -64,4 +64,7 @@ size_t mmap__mmap_len(struct mmap *map);
void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag);
+int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original,
+ struct mmap_cpu_mask *clone);
+
#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
index b234d95fb10a..9fc86971027b 100644
--- a/tools/perf/util/parse-events-hybrid.c
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -38,7 +38,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
static int create_event_hybrid(__u32 config_type, int *idx,
struct list_head *list,
- struct perf_event_attr *attr, char *name,
+ struct perf_event_attr *attr, const char *name,
+ const char *metric_id,
struct list_head *config_terms,
struct perf_pmu *pmu)
{
@@ -47,7 +48,7 @@ static int create_event_hybrid(__u32 config_type, int *idx,
__u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type);
- evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
if (evsel)
evsel->pmu_name = strdup(pmu->name);
@@ -70,7 +71,8 @@ static int pmu_cmp(struct parse_events_state *parse_state,
static int add_hw_hybrid(struct parse_events_state *parse_state,
struct list_head *list, struct perf_event_attr *attr,
- char *name, struct list_head *config_terms)
+ const char *name, const char *metric_id,
+ struct list_head *config_terms)
{
struct perf_pmu *pmu;
int ret;
@@ -84,7 +86,7 @@ static int add_hw_hybrid(struct parse_events_state *parse_state,
copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HARDWARE,
&parse_state->idx, list, attr, name,
- &terms, pmu);
+ metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
@@ -94,14 +96,16 @@ static int add_hw_hybrid(struct parse_events_state *parse_state,
}
static int create_raw_event_hybrid(int *idx, struct list_head *list,
- struct perf_event_attr *attr, char *name,
+ struct perf_event_attr *attr,
+ const char *name,
+ const char *metric_id,
struct list_head *config_terms,
struct perf_pmu *pmu)
{
struct evsel *evsel;
attr->type = pmu->type;
- evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
if (evsel)
evsel->pmu_name = strdup(pmu->name);
@@ -113,7 +117,8 @@ static int create_raw_event_hybrid(int *idx, struct list_head *list,
static int add_raw_hybrid(struct parse_events_state *parse_state,
struct list_head *list, struct perf_event_attr *attr,
- char *name, struct list_head *config_terms)
+ const char *name, const char *metric_id,
+ struct list_head *config_terms)
{
struct perf_pmu *pmu;
int ret;
@@ -126,7 +131,7 @@ static int add_raw_hybrid(struct parse_events_state *parse_state,
copy_config_terms(&terms, config_terms);
ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
- name, &terms, pmu);
+ name, metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
@@ -138,7 +143,8 @@ static int add_raw_hybrid(struct parse_events_state *parse_state,
int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_event_attr *attr,
- char *name, struct list_head *config_terms,
+ const char *name, const char *metric_id,
+ struct list_head *config_terms,
bool *hybrid)
{
*hybrid = false;
@@ -150,16 +156,18 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
*hybrid = true;
if (attr->type != PERF_TYPE_RAW) {
- return add_hw_hybrid(parse_state, list, attr, name,
+ return add_hw_hybrid(parse_state, list, attr, name, metric_id,
config_terms);
}
- return add_raw_hybrid(parse_state, list, attr, name,
+ return add_raw_hybrid(parse_state, list, attr, name, metric_id,
config_terms);
}
int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
- struct perf_event_attr *attr, char *name,
+ struct perf_event_attr *attr,
+ const char *name,
+ const char *metric_id,
struct list_head *config_terms,
bool *hybrid,
struct parse_events_state *parse_state)
@@ -180,7 +188,7 @@ int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
- attr, name, &terms, pmu);
+ attr, name, metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
diff --git a/tools/perf/util/parse-events-hybrid.h b/tools/perf/util/parse-events-hybrid.h
index f33bd67aa851..cbc05fec02a2 100644
--- a/tools/perf/util/parse-events-hybrid.h
+++ b/tools/perf/util/parse-events-hybrid.h
@@ -11,11 +11,13 @@
int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_event_attr *attr,
- char *name, struct list_head *config_terms,
+ const char *name, const char *metric_id,
+ struct list_head *config_terms,
bool *hybrid);
int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
- struct perf_event_attr *attr, char *name,
+ struct perf_event_attr *attr,
+ const char *name, const char *metric_id,
struct list_head *config_terms,
bool *hybrid,
struct parse_events_state *parse_state);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 51a2219df601..278199ed788b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -19,8 +19,6 @@
#include <subcmd/exec-cmd.h>
#include "string2.h"
#include "strlist.h"
-#include "symbol.h"
-#include "header.h"
#include "bpf-loader.h"
#include "debug.h"
#include <api/fs/tracing_path.h>
@@ -334,12 +332,7 @@ const char *event_type(int type)
return "unknown";
}
-static int parse_events__is_name_term(struct parse_events_term *term)
-{
- return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
-}
-
-static char *get_config_name(struct list_head *head_terms)
+static char *get_config_str(struct list_head *head_terms, int type_term)
{
struct parse_events_term *term;
@@ -347,17 +340,27 @@ static char *get_config_name(struct list_head *head_terms)
return NULL;
list_for_each_entry(term, head_terms, list)
- if (parse_events__is_name_term(term))
+ if (term->type_term == type_term)
return term->val.str;
return NULL;
}
+static char *get_config_metric_id(struct list_head *head_terms)
+{
+ return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
+}
+
+static char *get_config_name(struct list_head *head_terms)
+{
+ return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
+}
+
static struct evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
bool init_attr,
- char *name, struct perf_pmu *pmu,
+ const char *name, const char *metric_id, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats,
const char *cpu_list)
{
@@ -386,6 +389,9 @@ __add_event(struct list_head *list, int *idx,
if (name)
evsel->name = strdup(name);
+ if (metric_id)
+ evsel->metric_id = strdup(metric_id);
+
if (config_terms)
list_splice_init(config_terms, &evsel->config_terms);
@@ -396,18 +402,21 @@ __add_event(struct list_head *list, int *idx,
}
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
- char *name, struct perf_pmu *pmu)
+ const char *name, const char *metric_id,
+ struct perf_pmu *pmu)
{
- return __add_event(NULL, &idx, attr, false, name, pmu, NULL, false,
- NULL);
+ return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
+ metric_id, pmu, /*config_terms=*/NULL,
+ /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
}
static int add_event(struct list_head *list, int *idx,
- struct perf_event_attr *attr, char *name,
- struct list_head *config_terms)
+ struct perf_event_attr *attr, const char *name,
+ const char *metric_id, struct list_head *config_terms)
{
- return __add_event(list, idx, attr, true, name, NULL, config_terms,
- false, NULL) ? 0 : -ENOMEM;
+ return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
+ /*pmu=*/NULL, config_terms,
+ /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
}
static int add_event_tool(struct list_head *list, int *idx,
@@ -419,8 +428,10 @@ static int add_event_tool(struct list_head *list, int *idx,
.config = PERF_COUNT_SW_DUMMY,
};
- evsel = __add_event(list, idx, &attr, true, NULL, NULL, NULL, false,
- "0");
+ evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
+ /*metric_id=*/NULL, /*pmu=*/NULL,
+ /*config_terms=*/NULL, /*auto_merge_stats=*/false,
+ /*cpu_list=*/"0");
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
@@ -466,7 +477,8 @@ int parse_events_add_cache(struct list_head *list, int *idx,
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
- char name[MAX_NAME_LEN], *config_name;
+ char name[MAX_NAME_LEN];
+ const char *config_name, *metric_id;
int cache_type = -1, cache_op = -1, cache_result = -1;
char *op_result[2] = { op_result1, op_result2 };
int i, n, ret;
@@ -531,13 +543,17 @@ int parse_events_add_cache(struct list_head *list, int *idx,
return -ENOMEM;
}
+ metric_id = get_config_metric_id(head_config);
ret = parse_events__add_cache_hybrid(list, idx, &attr,
- config_name ? : name, &config_terms,
+ config_name ? : name,
+ metric_id,
+ &config_terms,
&hybrid, parse_state);
if (hybrid)
goto out_free_terms;
- ret = add_event(list, idx, &attr, config_name ? : name, &config_terms);
+ ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
+ &config_terms);
out_free_terms:
free_config_terms(&config_terms);
return ret;
@@ -1014,7 +1030,8 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
attr.type = PERF_TYPE_BREAKPOINT;
attr.sample_period = 1;
- return add_event(list, idx, &attr, NULL, NULL);
+ return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL,
+ /*config_terms=*/NULL);
}
static int check_type_val(struct parse_events_term *term,
@@ -1059,6 +1076,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
+ [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
};
static bool config_term_shrinked;
@@ -1081,6 +1099,7 @@ config_term_avail(int term_type, struct parse_events_error *err)
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_NAME:
+ case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
return true;
@@ -1171,6 +1190,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_NAME:
CHECK_TYPE_VAL(STR);
break;
+ case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
+ CHECK_TYPE_VAL(STR);
+ break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
CHECK_TYPE_VAL(NUM);
break;
@@ -1440,6 +1462,7 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
+ const char *name, *metric_id;
bool hybrid;
int ret;
@@ -1456,14 +1479,16 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
return -ENOMEM;
}
+ name = get_config_name(head_config);
+ metric_id = get_config_metric_id(head_config);
ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
- get_config_name(head_config),
+ name, metric_id,
&config_terms, &hybrid);
if (hybrid)
goto out_free_terms;
- ret = add_event(list, &parse_state->idx, &attr,
- get_config_name(head_config), &config_terms);
+ ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
+ &config_terms);
out_free_terms:
free_config_terms(&config_terms);
return ret;
@@ -1471,7 +1496,7 @@ out_free_terms:
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
- enum perf_tool_event tool_event)
+ int tool_event)
{
return add_event_tool(list, &parse_state->idx, tool_event);
}
@@ -1564,8 +1589,11 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, true, NULL,
- pmu, NULL, auto_merge_stats, NULL);
+ evsel = __add_event(list, &parse_state->idx, &attr,
+ /*init_attr=*/true, /*name=*/NULL,
+ /*metric_id=*/NULL, pmu,
+ /*config_terms=*/NULL, auto_merge_stats,
+ /*cpu_list=*/NULL);
if (evsel) {
evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
@@ -1618,9 +1646,10 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
- evsel = __add_event(list, &parse_state->idx, &attr, true,
- get_config_name(head_config), pmu,
- &config_terms, auto_merge_stats, NULL);
+ evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
+ get_config_name(head_config),
+ get_config_metric_id(head_config), pmu,
+ &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
if (!evsel)
return -ENOMEM;
@@ -1644,44 +1673,50 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- char *str, struct list_head **listp)
+ char *str, struct list_head *head,
+ struct list_head **listp)
{
struct parse_events_term *term;
- struct list_head *list;
+ struct list_head *list = NULL;
struct perf_pmu *pmu = NULL;
int ok = 0;
+ char *config;
*listp = NULL;
+
+ if (!head) {
+ head = malloc(sizeof(struct list_head));
+ if (!head)
+ goto out_err;
+
+ INIT_LIST_HEAD(head);
+ }
+ config = strdup(str);
+ if (!config)
+ goto out_err;
+
+ if (parse_events_term__num(&term,
+ PARSE_EVENTS__TERM_TYPE_USER,
+ config, 1, false, &config,
+ NULL) < 0) {
+ free(config);
+ goto out_err;
+ }
+ list_add_tail(&term->list, head);
+
+
/* Add it for all PMUs that support the alias */
list = malloc(sizeof(struct list_head));
if (!list)
- return -1;
+ goto out_err;
+
INIT_LIST_HEAD(list);
+
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
struct perf_pmu_alias *alias;
list_for_each_entry(alias, &pmu->aliases, list) {
if (!strcasecmp(alias->name, str)) {
- struct list_head *head;
- char *config;
-
- head = malloc(sizeof(struct list_head));
- if (!head)
- return -1;
- INIT_LIST_HEAD(head);
- config = strdup(str);
- if (!config)
- return -1;
- if (parse_events_term__num(&term,
- PARSE_EVENTS__TERM_TYPE_USER,
- config, 1, false, &config,
- NULL) < 0) {
- free(list);
- free(config);
- return -1;
- }
- list_add_tail(&term->list, head);
-
if (!parse_events_add_pmu(parse_state, list,
pmu->name, head,
true, true)) {
@@ -1689,17 +1724,17 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
pmu->name, alias->str);
ok++;
}
-
- parse_events_terms__delete(head);
}
}
}
- if (!ok) {
+out_err:
+ if (ok)
+ *listp = list;
+ else
free(list);
- return -1;
- }
- *listp = list;
- return 0;
+
+ parse_events_terms__delete(head);
+ return ok ? 0 : -1;
}
int parse_events__modifier_group(struct list_head *list,
@@ -2029,7 +2064,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
return 0;
}
-int parse_events_name(struct list_head *list, char *name)
+int parse_events_name(struct list_head *list, const char *name)
{
struct evsel *evsel;
@@ -2703,7 +2738,7 @@ next:
return 0;
}
-static bool is_event_supported(u8 type, unsigned config)
+static bool is_event_supported(u8 type, u64 config)
{
bool ret = true;
int open_return;
@@ -2823,10 +2858,18 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
int print_hwcache_events(const char *event_glob, bool name_only)
{
- unsigned int type, op, i, evt_i = 0, evt_num = 0;
- char name[64];
- char **evt_list = NULL;
+ unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
+ char name[64], new_name[128];
+ char **evt_list = NULL, **evt_pmus = NULL;
bool evt_num_known = false;
+ struct perf_pmu *pmu = NULL;
+
+ if (perf_pmu__has_hybrid()) {
+ npmus = perf_pmu__hybrid_pmu_num();
+ evt_pmus = zalloc(sizeof(char *) * npmus);
+ if (!evt_pmus)
+ goto out_enomem;
+ }
restart:
if (evt_num_known) {
@@ -2842,20 +2885,61 @@ restart:
continue;
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ unsigned int hybrid_supported = 0, j;
+ bool supported;
+
__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
if (event_glob != NULL && !strglobmatch(name, event_glob))
continue;
- if (!is_event_supported(PERF_TYPE_HW_CACHE,
- type | (op << 8) | (i << 16)))
- continue;
+ if (!perf_pmu__has_hybrid()) {
+ if (!is_event_supported(PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16))) {
+ continue;
+ }
+ } else {
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ supported = is_event_supported(
+ PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16) |
+ ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
+ if (supported) {
+ snprintf(new_name, sizeof(new_name), "%s/%s/",
+ pmu->name, name);
+ evt_pmus[hybrid_supported] = strdup(new_name);
+ hybrid_supported++;
+ }
+ }
+
+ if (hybrid_supported == 0)
+ continue;
+ }
if (!evt_num_known) {
evt_num++;
continue;
}
- evt_list[evt_i] = strdup(name);
+ if ((hybrid_supported == 0) ||
+ (hybrid_supported == npmus)) {
+ evt_list[evt_i] = strdup(name);
+ if (npmus > 0) {
+ for (j = 0; j < npmus; j++)
+ zfree(&evt_pmus[j]);
+ }
+ } else {
+ for (j = 0; j < hybrid_supported; j++) {
+ evt_list[evt_i++] = evt_pmus[j];
+ evt_pmus[j] = NULL;
+ }
+ continue;
+ }
+
if (evt_list[evt_i] == NULL)
goto out_enomem;
evt_i++;
@@ -2867,6 +2951,13 @@ restart:
evt_num_known = true;
goto restart;
}
+
+ for (evt_i = 0; evt_i < evt_num; evt_i++) {
+ if (!evt_list[evt_i])
+ break;
+ }
+
+ evt_num = evt_i;
qsort(evt_list, evt_num, sizeof(char *), cmp_string);
evt_i = 0;
while (evt_i < evt_num) {
@@ -2885,6 +2976,10 @@ out_free:
for (evt_i = 0; evt_i < evt_num; evt_i++)
zfree(&evt_list[evt_i]);
zfree(&evt_list);
+
+ for (evt_i = 0; evt_i < npmus; evt_i++)
+ zfree(&evt_pmus[evt_i]);
+ zfree(&evt_pmus);
return evt_num;
out_enomem:
@@ -2994,7 +3089,8 @@ out_enomem:
* Print the help text for the event symbols:
*/
void print_events(const char *event_glob, bool name_only, bool quiet_flag,
- bool long_desc, bool details_flag, bool deprecated)
+ bool long_desc, bool details_flag, bool deprecated,
+ const char *pmu_name)
{
print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
@@ -3006,7 +3102,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_hwcache_events(event_glob, name_only);
print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
- details_flag, deprecated);
+ details_flag, deprecated, pmu_name);
if (event_glob != NULL)
return;
@@ -3032,7 +3128,8 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
- metricgroup__print(true, true, NULL, name_only, details_flag);
+ metricgroup__print(true, true, NULL, name_only, details_flag,
+ pmu_name);
print_libpfm_events(name_only, long_desc);
}
@@ -3083,7 +3180,7 @@ int parse_events_term__num(struct parse_events_term **term,
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = type_term,
- .config = config,
+ .config = config ? : strdup(config_term_names[type_term]),
.no_value = no_value,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
@@ -3286,9 +3383,12 @@ fail:
struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
- char *name, struct perf_pmu *pmu,
+ const char *name,
+ const char *metric_id,
+ struct perf_pmu *pmu,
struct list_head *config_terms)
{
- return __add_event(list, idx, attr, true, name, pmu,
- config_terms, false, NULL);
+ return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
+ pmu, config_terms, /*auto_merge_stats=*/false,
+ /*cpu_list=*/NULL);
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index bf6e41aa9b6a..f60a661a2247 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -87,6 +87,7 @@ enum {
PARSE_EVENTS__TERM_TYPE_PERCORE,
PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT,
PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE,
+ PARSE_EVENTS__TERM_TYPE_METRIC_ID,
__PARSE_EVENTS__TERM_TYPE_NR,
};
@@ -162,7 +163,7 @@ void parse_events_terms__purge(struct list_head *terms);
void parse_events__clear_array(struct parse_events_array *a);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
-int parse_events_name(struct list_head *list, char *name);
+int parse_events_name(struct list_head *list, const char *name);
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *error,
@@ -182,10 +183,9 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config);
-enum perf_tool_event;
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
- enum perf_tool_event tool_event);
+ int tool_event);
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *error,
@@ -200,10 +200,12 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
bool use_alias);
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
- char *name, struct perf_pmu *pmu);
+ const char *name, const char *metric_id,
+ struct perf_pmu *pmu);
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str,
+ struct list_head *head_config,
struct list_head **listp);
int parse_events_copy_term_list(struct list_head *old,
@@ -219,7 +221,8 @@ void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str);
void print_events(const char *event_glob, bool name_only, bool quiet,
- bool long_desc, bool details_flag, bool deprecated);
+ bool long_desc, bool details_flag, bool deprecated,
+ const char *pmu_name);
struct event_symbol {
const char *symbol;
@@ -267,7 +270,9 @@ int perf_pmu__test_parse_init(void);
struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
- char *name, struct perf_pmu *pmu,
+ const char *name,
+ const char *metric_id,
+ struct perf_pmu *pmu,
struct list_head *config_terms);
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 923849024b15..4efe9872c667 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -12,7 +12,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
-#include "../perf.h"
#include "parse-events.h"
#include "parse-events-bison.h"
#include "evsel.h"
@@ -139,18 +138,23 @@ static int pmu_str_check(yyscan_t scanner, struct parse_events_state *parse_stat
yylval->str = strdup(text);
- if (parse_state->fake_pmu)
- return PE_PMU_EVENT_FAKE;
-
+ /*
+ * If we're not testing then parse check determines the PMU event type
+ * which if it isn't a PMU returns PE_NAME. When testing the result of
+ * parse check can't be trusted so we return PE_PMU_EVENT_FAKE unless
+ * an '!' is present in which case the text can't be a PMU name.
+ */
switch (perf_pmu__parse_check(text)) {
case PMU_EVENT_SYMBOL_PREFIX:
return PE_PMU_EVENT_PRE;
case PMU_EVENT_SYMBOL_SUFFIX:
return PE_PMU_EVENT_SUF;
case PMU_EVENT_SYMBOL:
- return PE_KERNEL_PMU_EVENT;
+ return parse_state->fake_pmu
+ ? PE_PMU_EVENT_FAKE : PE_KERNEL_PMU_EVENT;
default:
- return PE_NAME;
+ return parse_state->fake_pmu && !strchr(text,'!')
+ ? PE_PMU_EVENT_FAKE : PE_NAME;
}
}
@@ -205,7 +209,7 @@ bpf_source [^,{}]+\.c[a-zA-Z0-9._]*
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
-name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]]*
+name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]!]*
name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
@@ -295,6 +299,7 @@ no-overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
percore { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
aux-output { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT); }
aux-sample-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE); }
+metric-id { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_METRIC_ID); }
r{num_raw_hex} { return raw(yyscanner); }
r0x{num_raw_hex} { return raw(yyscanner); }
, { return ','; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d94e48e1ff9b..2d60f3cbe42b 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -183,6 +183,11 @@ group_def ':' PE_MODIFIER_EVENT
err = parse_events__modifier_group(list, $3);
free($3);
if (err) {
+ struct parse_events_state *parse_state = _parse_state;
+ struct parse_events_error *error = parse_state->error;
+
+ parse_events__handle_error(error, @3.first_column,
+ strdup("Bad modifier"), NULL);
free_list_evsel(list);
YYABORT;
}
@@ -240,6 +245,11 @@ event_name PE_MODIFIER_EVENT
err = parse_events__modifier_event(list, $2, false);
free($2);
if (err) {
+ struct parse_events_state *parse_state = _parse_state;
+ struct parse_events_error *error = parse_state->error;
+
+ parse_events__handle_error(error, @2.first_column,
+ strdup("Bad modifier"), NULL);
free_list_evsel(list);
YYABORT;
}
@@ -342,7 +352,20 @@ PE_KERNEL_PMU_EVENT sep_dc
struct list_head *list;
int err;
- err = parse_events_multi_pmu_add(_parse_state, $1, &list);
+ err = parse_events_multi_pmu_add(_parse_state, $1, NULL, &list);
+ free($1);
+ if (err < 0)
+ YYABORT;
+ $$ = list;
+}
+|
+PE_KERNEL_PMU_EVENT opt_pmu_config
+{
+ struct list_head *list;
+ int err;
+
+ /* frees $2 */
+ err = parse_events_multi_pmu_add(_parse_state, $1, $2, &list);
free($1);
if (err < 0)
YYABORT;
@@ -357,7 +380,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
snprintf(pmu_name, sizeof(pmu_name), "%s-%s", $1, $3);
free($1);
free($3);
- if (parse_events_multi_pmu_add(_parse_state, pmu_name, &list) < 0)
+ if (parse_events_multi_pmu_add(_parse_state, pmu_name, NULL, &list) < 0)
YYABORT;
$$ = list;
}
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index 756295dedccc..f0bcfcab1a93 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -87,7 +87,8 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
pmu = perf_pmu__find_by_type((unsigned int)attr.type);
evsel = parse_events__add_event(evlist->core.nr_entries,
- &attr, q, pmu);
+ &attr, q, /*metric_id=*/NULL,
+ pmu);
if (evsel == NULL)
goto error;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index bdabd62170d2..f3072c71d132 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -315,7 +315,7 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
}
static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
- char *desc, char *val, struct pmu_event *pe)
+ char *desc, char *val, const struct pmu_event *pe)
{
struct parse_events_term *term;
struct perf_pmu_alias *alias;
@@ -710,9 +710,9 @@ static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
return cpuid;
}
-struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
+const struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
{
- struct pmu_events_map *map;
+ const struct pmu_events_map *map;
char *cpuid = perf_pmu__getcpuid(pmu);
int i;
@@ -737,7 +737,7 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
-struct pmu_events_map *__weak pmu_events_map__find(void)
+const struct pmu_events_map *__weak pmu_events_map__find(void)
{
return perf_pmu__find_map(NULL);
}
@@ -824,7 +824,7 @@ out:
* as aliases.
*/
void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
- struct pmu_events_map *map)
+ const struct pmu_events_map *map)
{
int i;
const char *name = pmu->name;
@@ -834,7 +834,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
i = 0;
while (1) {
const char *cpu_name = is_arm_pmu_core(name) ? name : "cpu";
- struct pmu_event *pe = &map->table[i++];
+ const struct pmu_event *pe = &map->table[i++];
const char *pname = pe->pmu ? pe->pmu : cpu_name;
if (!pe->name) {
@@ -859,7 +859,7 @@ new_alias:
static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
- struct pmu_events_map *map;
+ const struct pmu_events_map *map;
map = perf_pmu__find_map(pmu);
if (!map)
@@ -873,7 +873,7 @@ void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data)
int i = 0;
while (1) {
- struct pmu_sys_events *event_table;
+ const struct pmu_sys_events *event_table;
int j = 0;
event_table = &pmu_sys_event_tables[i++];
@@ -882,7 +882,7 @@ void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data)
break;
while (1) {
- struct pmu_event *pe = &event_table->table[j++];
+ const struct pmu_event *pe = &event_table->table[j++];
int ret;
if (!pe->name && !pe->metric_group && !pe->metric_name)
@@ -900,7 +900,7 @@ struct pmu_sys_event_iter_data {
struct perf_pmu *pmu;
};
-static int pmu_add_sys_aliases_iter_fn(struct pmu_event *pe, void *data)
+static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, void *data)
{
struct pmu_sys_event_iter_data *idata = data;
struct perf_pmu *pmu = idata->pmu;
@@ -1608,6 +1608,7 @@ static int cmp_sevent(const void *a, const void *b)
{
const struct sevent *as = a;
const struct sevent *bs = b;
+ int ret;
/* Put extra events last */
if (!!as->desc != !!bs->desc)
@@ -1623,7 +1624,13 @@ static int cmp_sevent(const void *a, const void *b)
if (as->is_cpu != bs->is_cpu)
return bs->is_cpu - as->is_cpu;
- return strcmp(as->name, bs->name);
+ ret = strcmp(as->name, bs->name);
+ if (!ret) {
+ if (as->pmu && bs->pmu)
+ return strcmp(as->pmu, bs->pmu);
+ }
+
+ return ret;
}
static void wordwrap(char *s, int start, int max, int corr)
@@ -1653,7 +1660,8 @@ bool is_pmu_core(const char *name)
}
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
- bool long_desc, bool details_flag, bool deprecated)
+ bool long_desc, bool details_flag, bool deprecated,
+ const char *pmu_name)
{
struct perf_pmu *pmu;
struct perf_pmu_alias *alias;
@@ -1679,10 +1687,16 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
pmu = NULL;
j = 0;
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ if (pmu_name && perf_pmu__is_hybrid(pmu->name) &&
+ strcmp(pmu_name, pmu->name)) {
+ continue;
+ }
+
list_for_each_entry(alias, &pmu->aliases, list) {
char *name = alias->desc ? alias->name :
format_alias(buf, sizeof(buf), pmu, alias);
- bool is_cpu = is_pmu_core(pmu->name);
+ bool is_cpu = is_pmu_core(pmu->name) ||
+ perf_pmu__is_hybrid(pmu->name);
if (alias->deprecated && !deprecated)
continue;
@@ -1730,8 +1744,13 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (j = 0; j < len; j++) {
/* Skip duplicates */
- if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name))
- continue;
+ if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) {
+ if (!aliases[j].pmu || !aliases[j - 1].pmu ||
+ !strcmp(aliases[j].pmu, aliases[j - 1].pmu)) {
+ continue;
+ }
+ }
+
if (name_only) {
printf("%s ", aliases[j].name);
continue;
@@ -1906,7 +1925,7 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
}
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
- char *name)
+ const char *name)
{
struct perf_pmu_format *format;
__u64 masks = 0, bits;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 394898b07fd9..15bbec3a9959 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -111,7 +111,7 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
bool is_pmu_core(const char *name);
void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
bool long_desc, bool details_flag,
- bool deprecated);
+ bool deprecated, const char *pmu_name);
bool pmu_have_event(const char *pname, const char *name);
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
@@ -120,21 +120,21 @@ int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
- struct pmu_events_map *map);
+ const struct pmu_events_map *map);
-struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
-struct pmu_events_map *pmu_events_map__find(void);
+const struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
+const struct pmu_events_map *pmu_events_map__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
-typedef int (*pmu_sys_event_iter_fn)(struct pmu_event *pe, void *data);
+typedef int (*pmu_sys_event_iter_fn)(const struct pmu_event *pe, void *data);
void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
- char *name);
+ const char *name);
bool perf_pmu__has_hybrid(void);
int perf_pmu__match(char *pattern, char *name, char *tok);
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index d7c976671e3a..a685d20165f7 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -18,6 +18,7 @@ util/mmap.c
util/namespaces.c
../lib/bitmap.c
../lib/find_bit.c
+../lib/list_sort.c
../lib/hweight.c
../lib/string.c
../lib/vsprintf.c
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index 68f471d9a88b..ef6c2715fdd9 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -77,6 +77,7 @@ struct record_opts {
int ctl_fd;
int ctl_fd_ack;
bool ctl_fd_close;
+ int synth;
};
extern const char * const *record_usage;
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index 08ec3c3ae0ee..cd3a34840389 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -135,12 +135,12 @@ static int get_counterset_start(int setnr)
* the name of this counter.
* If no match is found a NULL pointer is returned.
*/
-static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
+static const char *get_counter_name(int set, int nr, const struct pmu_events_map *map)
{
int rc, event_nr, wanted = get_counterset_start(set) + nr;
if (map) {
- struct pmu_event *evp = map->table;
+ const struct pmu_event *evp = map->table;
for (; evp->name || evp->event || evp->desc; ++evp) {
if (evp->name == NULL || evp->event == NULL)
@@ -159,7 +159,7 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
unsigned char *buf = sample->raw_data;
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
- struct pmu_events_map *map;
+ const struct pmu_events_map *map;
u64 *p;
map = pmu_events_map__find();
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 352f16076e01..d8857d1b6d7c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -44,7 +44,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
size_t decomp_size, src_size;
u64 decomp_last_rem = 0;
size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
- struct decomp *decomp, *decomp_last = session->decomp_last;
+ struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
if (decomp_last) {
decomp_last_rem = decomp_last->size - decomp_last->head;
@@ -71,7 +71,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
src = (void *)event + sizeof(struct perf_record_compressed);
src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
- decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
+ decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
if (!decomp_size) {
munmap(decomp, mmap_len);
@@ -81,13 +81,12 @@ static int perf_session__process_compressed_event(struct perf_session *session,
decomp->size += decomp_size;
- if (session->decomp == NULL) {
- session->decomp = decomp;
- session->decomp_last = decomp;
- } else {
- session->decomp_last->next = decomp;
- session->decomp_last = decomp;
- }
+ if (session->active_decomp->decomp == NULL)
+ session->active_decomp->decomp = decomp;
+ else
+ session->active_decomp->decomp_last->next = decomp;
+
+ session->active_decomp->decomp_last = decomp;
pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
@@ -197,6 +196,8 @@ struct perf_session *__perf_session__new(struct perf_data *data,
session->repipe = repipe;
session->tool = tool;
+ session->decomp_data.zstd_decomp = &session->zstd_data;
+ session->active_decomp = &session->decomp_data;
INIT_LIST_HEAD(&session->auxtrace_index);
machines__init(&session->machines);
ordered_events__init(&session->ordered_events,
@@ -276,11 +277,11 @@ static void perf_session__delete_threads(struct perf_session *session)
machine__delete_threads(&session->machines.host);
}
-static void perf_session__release_decomp_events(struct perf_session *session)
+static void perf_decomp__release_events(struct decomp *next)
{
- struct decomp *next, *decomp;
+ struct decomp *decomp;
size_t mmap_len;
- next = session->decomp;
+
do {
decomp = next;
if (decomp == NULL)
@@ -299,7 +300,7 @@ void perf_session__delete(struct perf_session *session)
auxtrace_index__free(&session->auxtrace_index);
perf_session__destroy_kernel_maps(session);
perf_session__delete_threads(session);
- perf_session__release_decomp_events(session);
+ perf_decomp__release_events(session->decomp_data.decomp);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
if (session->data) {
@@ -509,6 +510,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->bpf = perf_event__process_bpf;
if (tool->text_poke == NULL)
tool->text_poke = perf_event__process_text_poke;
+ if (tool->aux_output_hw_id == NULL)
+ tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@@ -1000,6 +1003,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
+ [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
@@ -1556,6 +1560,8 @@ static int machines__deliver_event(struct machines *machines,
return tool->bpf(tool, event, sample, machine);
case PERF_RECORD_TEXT_POKE:
return tool->text_poke(tool, event, sample, machine);
+ case PERF_RECORD_AUX_OUTPUT_HW_ID:
+ return tool->aux_output_hw_id(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -2117,7 +2123,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
{
s64 skip;
u64 size;
- struct decomp *decomp = session->decomp_last;
+ struct decomp *decomp = session->active_decomp->decomp_last;
if (!decomp)
return 0;
@@ -2171,35 +2177,55 @@ struct reader {
u64 data_offset;
reader_cb_t process;
bool in_place_update;
+ char *mmaps[NUM_MMAPS];
+ size_t mmap_size;
+ int mmap_idx;
+ char *mmap_cur;
+ u64 file_pos;
+ u64 file_offset;
+ u64 head;
+ struct zstd_data zstd_data;
+ struct decomp_data decomp_data;
};
static int
-reader__process_events(struct reader *rd, struct perf_session *session,
- struct ui_progress *prog)
+reader__init(struct reader *rd, bool *one_mmap)
{
u64 data_size = rd->data_size;
- u64 head, page_offset, file_offset, file_pos, size;
- int err = 0, mmap_prot, mmap_flags, map_idx = 0;
- size_t mmap_size;
- char *buf, *mmaps[NUM_MMAPS];
- union perf_event *event;
- s64 skip;
-
- page_offset = page_size * (rd->data_offset / page_size);
- file_offset = page_offset;
- head = rd->data_offset - page_offset;
-
- ui_progress__init_size(prog, data_size, "Processing events...");
+ char **mmaps = rd->mmaps;
+ rd->head = rd->data_offset;
data_size += rd->data_offset;
- mmap_size = MMAP_SIZE;
- if (mmap_size > data_size) {
- mmap_size = data_size;
- session->one_mmap = true;
+ rd->mmap_size = MMAP_SIZE;
+ if (rd->mmap_size > data_size) {
+ rd->mmap_size = data_size;
+ if (one_mmap)
+ *one_mmap = true;
}
- memset(mmaps, 0, sizeof(mmaps));
+ memset(mmaps, 0, sizeof(rd->mmaps));
+
+ if (zstd_init(&rd->zstd_data, 0))
+ return -1;
+ rd->decomp_data.zstd_decomp = &rd->zstd_data;
+
+ return 0;
+}
+
+static void
+reader__release_decomp(struct reader *rd)
+{
+ perf_decomp__release_events(rd->decomp_data.decomp);
+ zstd_fini(&rd->zstd_data);
+}
+
+static int
+reader__mmap(struct reader *rd, struct perf_session *session)
+{
+ int mmap_prot, mmap_flags;
+ char *buf, **mmaps = rd->mmaps;
+ u64 page_offset;
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
@@ -2210,47 +2236,63 @@ reader__process_events(struct reader *rd, struct perf_session *session,
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
-remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
- file_offset);
+
+ if (mmaps[rd->mmap_idx]) {
+ munmap(mmaps[rd->mmap_idx], rd->mmap_size);
+ mmaps[rd->mmap_idx] = NULL;
+ }
+
+ page_offset = page_size * (rd->head / page_size);
+ rd->file_offset += page_offset;
+ rd->head -= page_offset;
+
+ buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
+ rd->file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
- err = -errno;
- goto out;
+ return -errno;
}
- mmaps[map_idx] = buf;
- map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
- file_pos = file_offset + head;
+ mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
+ rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
+ rd->file_pos = rd->file_offset + rd->head;
if (session->one_mmap) {
session->one_mmap_addr = buf;
- session->one_mmap_offset = file_offset;
+ session->one_mmap_offset = rd->file_offset;
}
-more:
- event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
+ return 0;
+}
+
+enum {
+ READER_OK,
+ READER_NODATA,
+};
+
+static int
+reader__read_event(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
+{
+ u64 size;
+ int err = READER_OK;
+ union perf_event *event;
+ s64 skip;
+
+ event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
+ session->header.needs_swap);
if (IS_ERR(event))
return PTR_ERR(event);
- if (!event) {
- if (mmaps[map_idx]) {
- munmap(mmaps[map_idx], mmap_size);
- mmaps[map_idx] = NULL;
- }
-
- page_offset = page_size * (head / page_size);
- file_offset += page_offset;
- head -= page_offset;
- goto remap;
- }
+ if (!event)
+ return READER_NODATA;
size = event->header.size;
skip = -EINVAL;
if (size < sizeof(struct perf_event_header) ||
- (skip = rd->process(session, event, file_pos)) < 0) {
+ (skip = rd->process(session, event, rd->file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
- file_offset + head, event->header.size,
+ rd->file_offset + rd->head, event->header.size,
event->header.type, strerror(-skip));
err = skip;
goto out;
@@ -2259,8 +2301,8 @@ more:
if (skip)
size += skip;
- head += size;
- file_pos += size;
+ rd->head += size;
+ rd->file_pos += size;
err = __perf_session__process_decomp_events(session);
if (err)
@@ -2268,13 +2310,48 @@ more:
ui_progress__update(prog, size);
+out:
+ return err;
+}
+
+static inline bool
+reader__eof(struct reader *rd)
+{
+ return (rd->file_pos >= rd->data_size + rd->data_offset);
+}
+
+static int
+reader__process_events(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
+{
+ int err;
+
+ err = reader__init(rd, &session->one_mmap);
+ if (err)
+ goto out;
+
+ session->active_decomp = &rd->decomp_data;
+
+remap:
+ err = reader__mmap(rd, session);
+ if (err)
+ goto out;
+
+more:
+ err = reader__read_event(rd, session, prog);
+ if (err < 0)
+ goto out;
+ else if (err == READER_NODATA)
+ goto remap;
+
if (session_done())
goto out;
- if (file_pos < data_size)
+ if (!reader__eof(rd))
goto more;
out:
+ session->active_decomp = &session->decomp_data;
return err;
}
@@ -2327,6 +2404,7 @@ out_err:
*/
ordered_events__reinit(&session->ordered_events);
auxtrace__free_events(session);
+ reader__release_decomp(&rd);
session->one_mmap = false;
return err;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5d8bd14a0a39..46c854292ad6 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -20,6 +20,12 @@ struct thread;
struct auxtrace;
struct itrace_synth_opts;
+struct decomp_data {
+ struct decomp *decomp;
+ struct decomp *decomp_last;
+ struct zstd_data *zstd_decomp;
+};
+
struct perf_session {
struct perf_header header;
struct machines machines;
@@ -39,8 +45,8 @@ struct perf_session {
u64 bytes_transferred;
u64 bytes_compressed;
struct zstd_data zstd_data;
- struct decomp *decomp;
- struct decomp *decomp_last;
+ struct decomp_data decomp_data;
+ struct decomp_data *active_decomp;
};
struct decomp {
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 5b7d6c16d33f..af468e3bb6fa 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -15,6 +17,7 @@
#include "srcline.h"
#include "string2.h"
#include "symbol.h"
+#include "subcmd/run-command.h"
bool srcline_full_filename;
@@ -119,6 +122,8 @@ static struct symbol *new_inline_sym(struct dso *dso,
return inline_sym;
}
+#define MAX_INLINE_NEST 1024
+
#ifdef HAVE_LIBBFD_SUPPORT
/*
@@ -273,8 +278,6 @@ static void addr2line_cleanup(struct a2l_data *a2l)
free(a2l);
}
-#define MAX_INLINE_NEST 1024
-
static int inline_list__append_dso_a2l(struct dso *dso,
struct inline_node *node,
struct symbol *sym)
@@ -361,26 +364,14 @@ void dso__free_a2l(struct dso *dso)
dso->a2l = NULL;
}
-static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
- struct dso *dso, struct symbol *sym)
-{
- struct inline_node *node;
-
- node = zalloc(sizeof(*node));
- if (node == NULL) {
- perror("not enough memory for the inline node");
- return NULL;
- }
-
- INIT_LIST_HEAD(&node->val);
- node->addr = addr;
-
- addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
- return node;
-}
-
#else /* HAVE_LIBBFD_SUPPORT */
+struct a2l_subprocess {
+ struct child_process addr2line;
+ FILE *to_child;
+ FILE *from_child;
+};
+
static int filename_split(char *filename, unsigned int *line_nr)
{
char *sep;
@@ -402,114 +393,285 @@ static int filename_split(char *filename, unsigned int *line_nr)
return 0;
}
-static int addr2line(const char *dso_name, u64 addr,
- char **file, unsigned int *line_nr,
- struct dso *dso __maybe_unused,
- bool unwind_inlines __maybe_unused,
- struct inline_node *node __maybe_unused,
- struct symbol *sym __maybe_unused)
+static void addr2line_subprocess_cleanup(struct a2l_subprocess *a2l)
{
- FILE *fp;
- char cmd[PATH_MAX];
- char *filename = NULL;
- size_t len;
- int ret = 0;
+ if (a2l->addr2line.pid != -1) {
+ kill(a2l->addr2line.pid, SIGKILL);
+ finish_command(&a2l->addr2line); /* ignore result, we don't care */
+ a2l->addr2line.pid = -1;
+ }
- scnprintf(cmd, sizeof(cmd), "addr2line -e %s %016"PRIx64,
- dso_name, addr);
+ if (a2l->to_child != NULL) {
+ fclose(a2l->to_child);
+ a2l->to_child = NULL;
+ }
- fp = popen(cmd, "r");
- if (fp == NULL) {
- pr_warning("popen failed for %s\n", dso_name);
- return 0;
+ if (a2l->from_child != NULL) {
+ fclose(a2l->from_child);
+ a2l->from_child = NULL;
+ }
+
+ free(a2l);
+}
+
+static struct a2l_subprocess *addr2line_subprocess_init(const char *path)
+{
+ const char *argv[] = { "addr2line", "-e", path, "-i", "-f", NULL };
+ struct a2l_subprocess *a2l = zalloc(sizeof(*a2l));
+ int start_command_status = 0;
+
+ if (a2l == NULL)
+ goto out;
+
+ a2l->to_child = NULL;
+ a2l->from_child = NULL;
+
+ a2l->addr2line.pid = -1;
+ a2l->addr2line.in = -1;
+ a2l->addr2line.out = -1;
+ a2l->addr2line.no_stderr = 1;
+
+ a2l->addr2line.argv = argv;
+ start_command_status = start_command(&a2l->addr2line);
+ a2l->addr2line.argv = NULL; /* it's not used after start_command; avoid dangling pointers */
+
+ if (start_command_status != 0) {
+ pr_warning("could not start addr2line for %s: start_command return code %d\n",
+ path,
+ start_command_status);
+ goto out;
}
- if (getline(&filename, &len, fp) < 0 || !len) {
- pr_warning("addr2line has no output for %s\n", dso_name);
+ a2l->to_child = fdopen(a2l->addr2line.in, "w");
+ if (a2l->to_child == NULL) {
+ pr_warning("could not open write-stream to addr2line of %s\n", path);
goto out;
}
- ret = filename_split(filename, line_nr);
- if (ret != 1) {
- free(filename);
+ a2l->from_child = fdopen(a2l->addr2line.out, "r");
+ if (a2l->from_child == NULL) {
+ pr_warning("could not open read-stream from addr2line of %s\n", path);
goto out;
}
- *file = filename;
+ return a2l;
out:
- pclose(fp);
- return ret;
+ if (a2l)
+ addr2line_subprocess_cleanup(a2l);
+
+ return NULL;
}
-void dso__free_a2l(struct dso *dso __maybe_unused)
+static int read_addr2line_record(struct a2l_subprocess *a2l,
+ char **function,
+ char **filename,
+ unsigned int *line_nr)
{
+ /*
+ * Returns:
+ * -1 ==> error
+ * 0 ==> sentinel (or other ill-formed) record read
+ * 1 ==> a genuine record read
+ */
+ char *line = NULL;
+ size_t line_len = 0;
+ unsigned int dummy_line_nr = 0;
+ int ret = -1;
+
+ if (function != NULL)
+ zfree(function);
+
+ if (filename != NULL)
+ zfree(filename);
+
+ if (line_nr != NULL)
+ *line_nr = 0;
+
+ if (getline(&line, &line_len, a2l->from_child) < 0 || !line_len)
+ goto error;
+
+ if (function != NULL)
+ *function = strdup(strim(line));
+
+ zfree(&line);
+ line_len = 0;
+
+ if (getline(&line, &line_len, a2l->from_child) < 0 || !line_len)
+ goto error;
+
+ if (filename_split(line, line_nr == NULL ? &dummy_line_nr : line_nr) == 0) {
+ ret = 0;
+ goto error;
+ }
+
+ if (filename != NULL)
+ *filename = strdup(line);
+
+ zfree(&line);
+ line_len = 0;
+
+ return 1;
+
+error:
+ free(line);
+ if (function != NULL)
+ zfree(function);
+ if (filename != NULL)
+ zfree(filename);
+ return ret;
}
-static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
- struct dso *dso __maybe_unused,
- struct symbol *sym)
+static int inline_list__append_record(struct dso *dso,
+ struct inline_node *node,
+ struct symbol *sym,
+ const char *function,
+ const char *filename,
+ unsigned int line_nr)
{
- FILE *fp;
- char cmd[PATH_MAX];
- struct inline_node *node;
- char *filename = NULL;
- char *funcname = NULL;
- size_t filelen, funclen;
- unsigned int line_nr = 0;
+ struct symbol *inline_sym = new_inline_sym(dso, sym, function);
- scnprintf(cmd, sizeof(cmd), "addr2line -e %s -i -f %016"PRIx64,
- dso_name, addr);
+ return inline_list__append(inline_sym, srcline_from_fileline(filename, line_nr), node);
+}
- fp = popen(cmd, "r");
- if (fp == NULL) {
- pr_err("popen failed for %s\n", dso_name);
- return NULL;
+static int addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line_nr,
+ struct dso *dso,
+ bool unwind_inlines,
+ struct inline_node *node,
+ struct symbol *sym __maybe_unused)
+{
+ struct a2l_subprocess *a2l = dso->a2l;
+ char *record_function = NULL;
+ char *record_filename = NULL;
+ unsigned int record_line_nr = 0;
+ int record_status = -1;
+ int ret = 0;
+ size_t inline_count = 0;
+
+ if (!a2l) {
+ dso->a2l = addr2line_subprocess_init(dso_name);
+ a2l = dso->a2l;
}
- node = zalloc(sizeof(*node));
- if (node == NULL) {
- perror("not enough memory for the inline node");
+ if (a2l == NULL) {
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: addr2line_subprocess_init failed\n", __func__, dso_name);
goto out;
}
- INIT_LIST_HEAD(&node->val);
- node->addr = addr;
-
- /* addr2line -f generates two lines for each inlined functions */
- while (getline(&funcname, &funclen, fp) != -1) {
- char *srcline;
- struct symbol *inline_sym;
+ /*
+ * Send our request and then *deliberately* send something that can't be interpreted as
+ * a valid address to ask addr2line about (namely, ","). This causes addr2line to first
+ * write out the answer to our request, in an unbounded/unknown number of records, and
+ * then to write out the lines "??" and "??:0", so that we can detect when it has
+ * finished giving us anything useful. We have to be careful about the first record,
+ * though, because it may be genuinely unknown, in which case we'll get two sets of
+ * "??"/"??:0" lines.
+ */
+ if (fprintf(a2l->to_child, "%016"PRIx64"\n,\n", addr) < 0 || fflush(a2l->to_child) != 0) {
+ pr_warning("%s %s: could not send request\n", __func__, dso_name);
+ goto out;
+ }
- strim(funcname);
+ switch (read_addr2line_record(a2l, &record_function, &record_filename, &record_line_nr)) {
+ case -1:
+ pr_warning("%s %s: could not read first record\n", __func__, dso_name);
+ goto out;
+ case 0:
+ /*
+ * The first record was invalid, so return failure, but first read another
+ * record, since we asked a junk question and have to clear the answer out.
+ */
+ switch (read_addr2line_record(a2l, NULL, NULL, NULL)) {
+ case -1:
+ pr_warning("%s %s: could not read delimiter record\n", __func__, dso_name);
+ break;
+ case 0:
+ /* As expected. */
+ break;
+ default:
+ pr_warning("%s %s: unexpected record instead of sentinel",
+ __func__, dso_name);
+ break;
+ }
+ goto out;
+ default:
+ break;
+ }
- if (getline(&filename, &filelen, fp) == -1)
- goto out;
+ if (file) {
+ *file = strdup(record_filename);
+ ret = 1;
+ }
+ if (line_nr)
+ *line_nr = record_line_nr;
- if (filename_split(filename, &line_nr) != 1)
+ if (unwind_inlines) {
+ if (node && inline_list__append_record(dso, node, sym,
+ record_function,
+ record_filename,
+ record_line_nr)) {
+ ret = 0;
goto out;
+ }
+ }
- srcline = srcline_from_fileline(filename, line_nr);
- inline_sym = new_inline_sym(dso, sym, funcname);
-
- if (inline_list__append(inline_sym, srcline, node) != 0) {
- free(srcline);
- if (inline_sym && inline_sym->inlined)
- symbol__delete(inline_sym);
- goto out;
+ /* We have to read the records even if we don't care about the inline info. */
+ while ((record_status = read_addr2line_record(a2l,
+ &record_function,
+ &record_filename,
+ &record_line_nr)) == 1) {
+ if (unwind_inlines && node && inline_count++ < MAX_INLINE_NEST) {
+ if (inline_list__append_record(dso, node, sym,
+ record_function,
+ record_filename,
+ record_line_nr)) {
+ ret = 0;
+ goto out;
+ }
+ ret = 1; /* found at least one inline frame */
}
}
out:
- pclose(fp);
- free(filename);
- free(funcname);
+ free(record_function);
+ free(record_filename);
+ return ret;
+}
- return node;
+void dso__free_a2l(struct dso *dso)
+{
+ struct a2l_subprocess *a2l = dso->a2l;
+
+ if (!a2l)
+ return;
+
+ addr2line_subprocess_cleanup(a2l);
+
+ dso->a2l = NULL;
}
#endif /* HAVE_LIBBFD_SUPPORT */
+static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
+ struct dso *dso, struct symbol *sym)
+{
+ struct inline_node *node;
+
+ node = zalloc(sizeof(*node));
+ if (node == NULL) {
+ perror("not enough memory for the inline node");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&node->val);
+ node->addr = addr;
+
+ addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
+ return node;
+}
+
/*
* Number of addr2line failures (without success) before disabling it for that
* dso.
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 34a7f5c1fff7..69f3cf3b4a44 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#include <math.h>
#include <stdio.h>
#include "evsel.h"
#include "stat.h"
#include "color.h"
+#include "debug.h"
#include "pmu.h"
#include "rblist.h"
#include "evlist.h"
@@ -370,12 +372,16 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
{
struct evsel *counter, *leader, **metric_events, *oc;
bool found;
- struct expr_parse_ctx ctx;
+ struct expr_parse_ctx *ctx;
struct hashmap_entry *cur;
size_t bkt;
int i;
- expr__ctx_init(&ctx);
+ ctx = expr__ctx_new();
+ if (!ctx) {
+ pr_debug("expr__ctx_new failed");
+ return;
+ }
evlist__for_each_entry(evsel_list, counter) {
bool invalid = false;
@@ -383,25 +389,25 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
if (!counter->metric_expr)
continue;
- expr__ctx_clear(&ctx);
+ expr__ctx_clear(ctx);
metric_events = counter->metric_events;
if (!metric_events) {
- if (expr__find_other(counter->metric_expr,
- counter->name,
- &ctx, 1) < 0)
+ if (expr__find_ids(counter->metric_expr,
+ counter->name,
+ ctx) < 0)
continue;
metric_events = calloc(sizeof(struct evsel *),
- hashmap__size(&ctx.ids) + 1);
+ hashmap__size(ctx->ids) + 1);
if (!metric_events) {
- expr__ctx_clear(&ctx);
+ expr__ctx_free(ctx);
return;
}
counter->metric_events = metric_events;
}
i = 0;
- hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
const char *metric_name = (const char *)cur->key;
found = false;
@@ -453,7 +459,7 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
counter->metric_expr = NULL;
}
}
- expr__ctx_clear(&ctx);
+ expr__ctx_free(ctx);
}
static double runtime_stat_avg(struct runtime_stat *st,
@@ -815,10 +821,9 @@ static int prepare_metric(struct evsel **metric_events,
struct runtime_stat *st)
{
double scale;
- char *n, *pn;
+ char *n;
int i, j, ret;
- expr__ctx_init(pctx);
for (i = 0; metric_events[i]; i++) {
struct saved_value *v;
struct stats *stats;
@@ -839,23 +844,11 @@ static int prepare_metric(struct evsel **metric_events,
if (v->metric_other)
metric_total = v->metric_total;
}
-
- n = strdup(metric_events[i]->name);
+ n = strdup(evsel__metric_id(metric_events[i]));
if (!n)
return -ENOMEM;
- /*
- * This display code with --no-merge adds [cpu] postfixes.
- * These are not supported by the parser. Remove everything
- * after the space.
- */
- pn = strchr(n, ' ');
- if (pn)
- *pn = 0;
-
- if (metric_total)
- expr__add_id_val(pctx, n, metric_total);
- else
- expr__add_id_val(pctx, n, avg_stats(stats)*scale);
+
+ expr__add_id_val(pctx, n, metric_total ? : avg_stats(stats) * scale);
}
for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
@@ -880,17 +873,23 @@ static void generic_metric(struct perf_stat_config *config,
struct runtime_stat *st)
{
print_metric_t print_metric = out->print_metric;
- struct expr_parse_ctx pctx;
+ struct expr_parse_ctx *pctx;
double ratio, scale;
int i;
void *ctxp = out->ctx;
- i = prepare_metric(metric_events, metric_refs, &pctx, cpu, st);
- if (i < 0)
+ pctx = expr__ctx_new();
+ if (!pctx)
return;
+ pctx->runtime = runtime;
+ i = prepare_metric(metric_events, metric_refs, pctx, cpu, st);
+ if (i < 0) {
+ expr__ctx_free(pctx);
+ return;
+ }
if (!metric_events[i]) {
- if (expr__parse(&ratio, &pctx, metric_expr, runtime) == 0) {
+ if (expr__parse(&ratio, pctx, metric_expr) == 0) {
char *unit;
char metric_bf[64];
@@ -926,22 +925,26 @@ static void generic_metric(struct perf_stat_config *config,
(metric_name ? metric_name : name) : "", 0);
}
- expr__ctx_clear(&pctx);
+ expr__ctx_free(pctx);
}
double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
{
- struct expr_parse_ctx pctx;
+ struct expr_parse_ctx *pctx;
double ratio = 0.0;
- if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
+ pctx = expr__ctx_new();
+ if (!pctx)
+ return NAN;
+
+ if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, cpu, st) < 0)
goto out;
- if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
+ if (expr__parse(&ratio, pctx, mexp->metric_expr))
ratio = 0.0;
out:
- expr__ctx_clear(&pctx);
+ expr__ctx_free(pctx);
return ratio;
}
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index a7e981b2d7de..198982109f0f 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -715,7 +715,8 @@ static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *fork_event,
union perf_event *namespaces_event,
pid_t pid, int full, perf_event__handler_t process,
- struct perf_tool *tool, struct machine *machine, bool mmap_data)
+ struct perf_tool *tool, struct machine *machine,
+ bool needs_mmap, bool mmap_data)
{
char filename[PATH_MAX];
struct dirent **dirent;
@@ -739,7 +740,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
* send mmap only for thread group leader
* see thread__init_maps()
*/
- if (pid == tgid &&
+ if (pid == tgid && needs_mmap &&
perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data))
return -1;
@@ -786,7 +787,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
break;
rc = 0;
- if (_pid == pid && !kernel_thread) {
+ if (_pid == pid && !kernel_thread && needs_mmap) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
@@ -806,7 +807,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine,
- bool mmap_data)
+ bool needs_mmap, bool mmap_data)
{
union perf_event *comm_event, *mmap_event, *fork_event;
union perf_event *namespaces_event;
@@ -836,7 +837,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
fork_event, namespaces_event,
perf_thread_map__pid(threads, thread), 0,
process, tool, machine,
- mmap_data)) {
+ needs_mmap, mmap_data)) {
err = -1;
break;
}
@@ -862,7 +863,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
fork_event, namespaces_event,
comm_event->comm.pid, 0,
process, tool, machine,
- mmap_data)) {
+ needs_mmap, mmap_data)) {
err = -1;
break;
}
@@ -882,6 +883,7 @@ out:
static int __perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
+ bool needs_mmap,
bool mmap_data,
struct dirent **dirent,
int start,
@@ -926,7 +928,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
*/
__event__synthesize_thread(comm_event, mmap_event, fork_event,
namespaces_event, pid, 1, process,
- tool, machine, mmap_data);
+ tool, machine, needs_mmap, mmap_data);
}
err = 0;
@@ -945,6 +947,7 @@ struct synthesize_threads_arg {
struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
+ bool needs_mmap;
bool mmap_data;
struct dirent **dirent;
int num;
@@ -956,7 +959,8 @@ static void *synthesize_threads_worker(void *arg)
struct synthesize_threads_arg *args = arg;
__perf_event__synthesize_threads(args->tool, args->process,
- args->machine, args->mmap_data,
+ args->machine,
+ args->needs_mmap, args->mmap_data,
args->dirent,
args->start, args->num);
return NULL;
@@ -965,7 +969,7 @@ static void *synthesize_threads_worker(void *arg)
int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
- bool mmap_data,
+ bool needs_mmap, bool mmap_data,
unsigned int nr_threads_synthesize)
{
struct synthesize_threads_arg *args = NULL;
@@ -994,7 +998,8 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (thread_nr <= 1) {
err = __perf_event__synthesize_threads(tool, process,
- machine, mmap_data,
+ machine,
+ needs_mmap, mmap_data,
dirent, base, n);
goto free_dirent;
}
@@ -1015,6 +1020,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
args[i].tool = tool;
args[i].process = process;
args[i].machine = machine;
+ args[i].needs_mmap = needs_mmap;
args[i].mmap_data = mmap_data;
args[i].dirent = dirent;
}
@@ -1775,26 +1781,27 @@ out_err:
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
- unsigned int nr_threads_synthesize)
+ perf_event__handler_t process, bool needs_mmap,
+ bool data_mmap, unsigned int nr_threads_synthesize)
{
if (target__has_task(target))
- return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+ return perf_event__synthesize_thread_map(tool, threads, process, machine,
+ needs_mmap, data_mmap);
else if (target__has_cpu(target))
- return perf_event__synthesize_threads(tool, process,
- machine, data_mmap,
+ return perf_event__synthesize_threads(tool, process, machine,
+ needs_mmap, data_mmap,
nr_threads_synthesize);
/* command specified */
return 0;
}
int machine__synthesize_threads(struct machine *machine, struct target *target,
- struct perf_thread_map *threads, bool data_mmap,
- unsigned int nr_threads_synthesize)
+ struct perf_thread_map *threads, bool needs_mmap,
+ bool data_mmap, unsigned int nr_threads_synthesize)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
- perf_event__process, data_mmap,
- nr_threads_synthesize);
+ perf_event__process, needs_mmap,
+ data_mmap, nr_threads_synthesize);
}
static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
@@ -2230,3 +2237,31 @@ int perf_event__synthesize_for_pipe(struct perf_tool *tool,
return ret;
}
+
+int parse_synth_opt(char *synth)
+{
+ char *p, *q;
+ int ret = 0;
+
+ if (synth == NULL)
+ return -1;
+
+ for (q = synth; (p = strsep(&q, ",")); p = q) {
+ if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
+ return 0;
+
+ if (!strcasecmp(p, "all"))
+ return PERF_SYNTH_ALL;
+
+ if (!strcasecmp(p, "task"))
+ ret |= PERF_SYNTH_TASK;
+ else if (!strcasecmp(p, "mmap"))
+ ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
+ else if (!strcasecmp(p, "cgroup"))
+ ret |= PERF_SYNTH_CGROUP;
+ else
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index c845e2b9b444..c931433bacbf 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -27,6 +27,18 @@ struct target;
union perf_event;
+enum perf_record_synth {
+ PERF_SYNTH_TASK = 1 << 0,
+ PERF_SYNTH_MMAP = 1 << 1,
+ PERF_SYNTH_CGROUP = 1 << 2,
+
+ /* last element */
+ PERF_SYNTH_MAX = 1 << 3,
+};
+#define PERF_SYNTH_ALL (PERF_SYNTH_MAX - 1)
+
+int parse_synth_opt(char *str);
+
typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine);
@@ -53,8 +65,8 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data);
-int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int nr_threads_synthesize);
+int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data);
+int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data, unsigned int nr_threads_synthesize);
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
@@ -65,10 +77,10 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
+ perf_event__handler_t process, bool needs_mmap, bool data_mmap,
unsigned int nr_threads_synthesize);
int machine__synthesize_threads(struct machine *machine, struct target *target,
- struct perf_thread_map *threads, bool data_mmap,
+ struct perf_thread_map *threads, bool needs_mmap, bool data_mmap,
unsigned int nr_threads_synthesize);
#ifdef HAVE_AUXTRACE_SUPPORT
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index bbbc0dcd461f..ef873f2cc38f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -53,6 +53,7 @@ struct perf_tool {
lost_samples,
aux,
itrace_start,
+ aux_output_hw_id,
context_switch,
throttle,
unthrottle,